code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu snake_case_ : List[Any] = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json" with io.open(filename, "r", encoding="utf-8") as f: snake_case_ : Tuple = json.load(f) @require_torch class __snake_case ( unittest.TestCase ): def lowerCamelCase ( self : List[Any] , _snake_case : Tuple): """simple docstring""" return FSMTTokenizer.from_pretrained(UpperCamelCase__) def lowerCamelCase ( self : Tuple , _snake_case : Optional[int]): """simple docstring""" UpperCAmelCase_ = FSMTForConditionalGeneration.from_pretrained(UpperCamelCase__).to(UpperCamelCase__) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ['''en-ru''', 2_6.0], ['''ru-en''', 2_2.0], ['''en-de''', 2_2.0], ['''de-en''', 2_9.0], ]) @slow def lowerCamelCase ( self : Union[str, Any] , _snake_case : int , _snake_case : str): """simple docstring""" UpperCAmelCase_ = F"""facebook/wmt19-{pair}""" UpperCAmelCase_ = self.get_tokenizer(UpperCamelCase__) UpperCAmelCase_ = self.get_model(UpperCamelCase__) UpperCAmelCase_ = bleu_data[pair]['''src'''] UpperCAmelCase_ = bleu_data[pair]['''tgt'''] UpperCAmelCase_ = tokenizer(UpperCamelCase__ , return_tensors='''pt''' , truncation=UpperCamelCase__ , padding='''longest''').to(UpperCamelCase__) UpperCAmelCase_ = model.generate( input_ids=batch.input_ids , num_beams=8 , ) UpperCAmelCase_ = tokenizer.batch_decode( UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__) UpperCAmelCase_ = calculate_bleu(UpperCamelCase__ , UpperCamelCase__) print(UpperCamelCase__) self.assertGreaterEqual(scores['''bleu'''] , UpperCamelCase__)
362
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass snake_case_ : List[Any] = (3, 9, -11, 0, 7, 5, 1, -1) snake_case_ : str = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class __snake_case : UpperCAmelCase__ : int UpperCAmelCase__ : Node | None class __snake_case : def __init__( self : Optional[int] , _snake_case : Iterable[int]): """simple docstring""" UpperCAmelCase_ = None for i in sorted(_snake_case , reverse=_snake_case): UpperCAmelCase_ = Node(_snake_case , self.head) def __iter__( self : Dict): """simple docstring""" UpperCAmelCase_ = self.head while node: yield node.data UpperCAmelCase_ = node.next_node def __len__( self : int): """simple docstring""" return sum(1 for _ in self) def __str__( self : Optional[Any]): """simple docstring""" return " -> ".join([str(_snake_case) for node in self]) def A (__A : SortedLinkedList , __A : SortedLinkedList ) -> SortedLinkedList: """simple docstring""" return SortedLinkedList(list(__A ) + list(__A ) ) if __name__ == "__main__": import doctest doctest.testmod() snake_case_ : Union[str, Any] = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
7
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case_ : Dict = logging.get_logger(__name__) snake_case_ : int = { "hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json", # See all YOLOS models at https://huggingface.co/models?filter=yolos } class __snake_case ( snake_case__ ): UpperCAmelCase__ : Dict = """yolos""" def __init__( self : List[str] , _snake_case : str=768 , _snake_case : Tuple=12 , _snake_case : List[Any]=12 , _snake_case : Any=3072 , _snake_case : Dict="gelu" , _snake_case : Optional[Any]=0.0 , _snake_case : Any=0.0 , _snake_case : int=0.0_2 , _snake_case : Union[str, Any]=1e-12 , _snake_case : Optional[int]=[512, 864] , _snake_case : Optional[int]=16 , _snake_case : List[Any]=3 , _snake_case : Union[str, Any]=True , _snake_case : List[Any]=100 , _snake_case : str=True , _snake_case : int=False , _snake_case : Any=1 , _snake_case : Union[str, Any]=5 , _snake_case : Any=2 , _snake_case : Union[str, Any]=5 , _snake_case : Optional[Any]=2 , _snake_case : Tuple=0.1 , **_snake_case : int , ): """simple docstring""" super().__init__(**_A) UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = initializer_range UpperCAmelCase_ = layer_norm_eps UpperCAmelCase_ = image_size UpperCAmelCase_ = patch_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = qkv_bias UpperCAmelCase_ = num_detection_tokens UpperCAmelCase_ = use_mid_position_embeddings UpperCAmelCase_ = auxiliary_loss # Hungarian matcher UpperCAmelCase_ = class_cost UpperCAmelCase_ = bbox_cost UpperCAmelCase_ = giou_cost # Loss coefficients UpperCAmelCase_ = bbox_loss_coefficient UpperCAmelCase_ = giou_loss_coefficient UpperCAmelCase_ = eos_coefficient class __snake_case ( snake_case__ ): UpperCAmelCase__ : Tuple = version.parse('''1.11''' ) @property def lowerCamelCase ( self : List[Any]): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ]) @property def lowerCamelCase ( self : Dict): """simple docstring""" return 1e-4 @property def lowerCamelCase ( self : str): """simple docstring""" return 12
363
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig snake_case_ : Union[str, Any] = logging.get_logger(__name__) class __snake_case : def __init__( self : int , _snake_case : List[Any] , _snake_case : Tuple): """simple docstring""" UpperCAmelCase_ = question_encoder UpperCAmelCase_ = generator UpperCAmelCase_ = self.question_encoder def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int]): """simple docstring""" if os.path.isfile(_snake_case): raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""") os.makedirs(_snake_case , exist_ok=_snake_case) UpperCAmelCase_ = os.path.join(_snake_case , '''question_encoder_tokenizer''') UpperCAmelCase_ = os.path.join(_snake_case , '''generator_tokenizer''') self.question_encoder.save_pretrained(_snake_case) self.generator.save_pretrained(_snake_case) @classmethod def lowerCamelCase ( cls : Optional[Any] , _snake_case : Optional[Any] , **_snake_case : Optional[int]): """simple docstring""" from ..auto.tokenization_auto import AutoTokenizer UpperCAmelCase_ = kwargs.pop('''config''' , _snake_case) if config is None: UpperCAmelCase_ = RagConfig.from_pretrained(_snake_case) UpperCAmelCase_ = AutoTokenizer.from_pretrained( _snake_case , config=config.question_encoder , subfolder='''question_encoder_tokenizer''') UpperCAmelCase_ = AutoTokenizer.from_pretrained( _snake_case , config=config.generator , subfolder='''generator_tokenizer''') return cls(question_encoder=_snake_case , generator=_snake_case) def __call__( self : List[Any] , *_snake_case : List[str] , **_snake_case : List[Any]): """simple docstring""" return self.current_tokenizer(*_snake_case , **_snake_case) def lowerCamelCase ( self : List[Any] , *_snake_case : str , **_snake_case : Union[str, Any]): """simple docstring""" return self.generator.batch_decode(*_snake_case , **_snake_case) def lowerCamelCase ( self : str , *_snake_case : Optional[int] , **_snake_case : Any): """simple docstring""" return self.generator.decode(*_snake_case , **_snake_case) def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = self.question_encoder def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = self.generator def lowerCamelCase ( self : Optional[Any] , _snake_case : List[str] , _snake_case : Optional[List[str]] = None , _snake_case : Optional[int] = None , _snake_case : Optional[int] = None , _snake_case : str = "longest" , _snake_case : str = None , _snake_case : bool = True , **_snake_case : Optional[int] , ): """simple docstring""" warnings.warn( '''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ''' '''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ''' '''context manager to prepare your targets. See the documentation of your specific tokenizer for more ''' '''details''' , _snake_case , ) if max_length is None: UpperCAmelCase_ = self.current_tokenizer.model_max_length UpperCAmelCase_ = self( _snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , max_length=_snake_case , padding=_snake_case , truncation=_snake_case , **_snake_case , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: UpperCAmelCase_ = self.current_tokenizer.model_max_length UpperCAmelCase_ = self( text_target=_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , **_snake_case , ) UpperCAmelCase_ = labels['''input_ids'''] return model_inputs
7
0
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class __snake_case ( a__ ): def lowerCamelCase ( self : Optional[Any] , _snake_case : List[Any]): """simple docstring""" with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''') as input_file: UpperCAmelCase_ = re.compile(r'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''') UpperCAmelCase_ = input_file.read() UpperCAmelCase_ = regexp.search(SCREAMING_SNAKE_CASE_) return match def lowerCamelCase ( self : Dict , _snake_case : Union[str, Any]): """simple docstring""" with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''') as input_file: UpperCAmelCase_ = re.compile(r'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL) UpperCAmelCase_ = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` UpperCAmelCase_ = regexp.finditer(SCREAMING_SNAKE_CASE_) UpperCAmelCase_ = [match for match in matches if match is not None and match.group(1) is not None] return matches[0] if matches else None def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = Path('''./datasets''') UpperCAmelCase_ = list(dataset_paths.absolute().glob('''**/*.py''')) for dataset in dataset_files: if self._no_encoding_on_file_open(str(SCREAMING_SNAKE_CASE_)): raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""") def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ = Path('''./datasets''') UpperCAmelCase_ = list(dataset_paths.absolute().glob('''**/*.py''')) for dataset in dataset_files: if self._no_print_statements(str(SCREAMING_SNAKE_CASE_)): raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""")
364
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class __snake_case ( unittest.TestCase ): @slow def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-base''') UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]]) # The dog is cute and lives in the garden house UpperCAmelCase_ = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim UpperCAmelCase_ = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach() self.assertEqual(output.shape , _snake_case) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3)) @slow def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-large''') UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]]) # The dog is cute and lives in the garden house UpperCAmelCase_ = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim UpperCAmelCase_ = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach() self.assertEqual(output.shape , _snake_case) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3))
7
0
"""simple docstring""" import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def A () -> Optional[Any]: """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(lowerCAmelCase__ ): requests.request('''GET''' , '''https://huggingface.co''' ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 ) @pytest.mark.integration def A () -> Any: """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request('''GET''' , '''https://huggingface.co''' ) def A () -> Dict: """simple docstring""" with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(lowerCAmelCase__ ): http_head('''https://huggingface.co''' )
365
from maths.prime_factors import prime_factors def A (__A : int ) -> int: """simple docstring""" if not isinstance(__A , __A ): UpperCAmelCase_ = F"""Input value of [number={number}] must be an integer""" raise TypeError(__A ) if number < 1: raise ValueError('''Input must be a positive integer''' ) return -1 if len(prime_factors(__A ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
7
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available snake_case_ : List[Any] = { "configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"], "tokenization_roc_bert": ["RoCBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Dict = [ "ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "RoCBertForCausalLM", "RoCBertForMaskedLM", "RoCBertForMultipleChoice", "RoCBertForPreTraining", "RoCBertForQuestionAnswering", "RoCBertForSequenceClassification", "RoCBertForTokenClassification", "RoCBertLayer", "RoCBertModel", "RoCBertPreTrainedModel", "load_tf_weights_in_roc_bert", ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys snake_case_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
366
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class __snake_case ( unittest.TestCase ): def lowerCamelCase ( self : Optional[int] , _snake_case : Union[str, Any]): """simple docstring""" for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss''']): UpperCAmelCase_ = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(_snake_case) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = '''sgugger/tiny-distilbert-classification''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , only_pretrain_model=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , torchscript=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''') def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , fpaa=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) # set architectures equal to `None` UpperCAmelCase_ = None UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config]) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) @unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''') def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_snake_case , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config]) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tinier_bart''' UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config]) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config]) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tinier_bart''' UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config]) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , save_to_csv=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_snake_case , '''inf_time.csv''') , train_memory_csv_file=os.path.join(_snake_case , '''train_mem.csv''') , inference_memory_csv_file=os.path.join(_snake_case , '''inf_mem.csv''') , train_time_csv_file=os.path.join(_snake_case , '''train_time.csv''') , env_info_csv_file=os.path.join(_snake_case , '''env.csv''') , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) benchmark.run() self.assertTrue(Path(os.path.join(_snake_case , '''inf_time.csv''')).exists()) self.assertTrue(Path(os.path.join(_snake_case , '''train_time.csv''')).exists()) self.assertTrue(Path(os.path.join(_snake_case , '''inf_mem.csv''')).exists()) self.assertTrue(Path(os.path.join(_snake_case , '''train_mem.csv''')).exists()) self.assertTrue(Path(os.path.join(_snake_case , '''env.csv''')).exists()) def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(_snake_case : Tuple): self.assertTrue(hasattr(_snake_case , '''sequential''')) self.assertTrue(hasattr(_snake_case , '''cumulative''')) self.assertTrue(hasattr(_snake_case , '''current''')) self.assertTrue(hasattr(_snake_case , '''total''')) with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_snake_case , '''log.txt''') , log_print=_snake_case , trace_memory_line_by_line=_snake_case , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() _check_summary_is_not_empty(result.inference_summary) _check_summary_is_not_empty(result.train_summary) self.assertTrue(Path(os.path.join(_snake_case , '''log.txt''')).exists())
7
0
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case_ : Optional[int] = logging.get_logger(__name__) snake_case_ : Tuple = { 'microsoft/trocr-base-handwritten': ( 'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json' ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class __snake_case ( UpperCamelCase__ ): UpperCAmelCase__ : Optional[Any] = """trocr""" UpperCAmelCase__ : Any = ["""past_key_values"""] UpperCAmelCase__ : Tuple = { """num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model""", """num_hidden_layers""": """decoder_layers""", } def __init__( self : int , _snake_case : Dict=50265 , _snake_case : Tuple=1024 , _snake_case : int=12 , _snake_case : Dict=16 , _snake_case : Tuple=4096 , _snake_case : str="gelu" , _snake_case : Dict=512 , _snake_case : Optional[Any]=0.1 , _snake_case : List[str]=0.0 , _snake_case : Tuple=0.0 , _snake_case : Dict=2 , _snake_case : Optional[int]=0.0_2 , _snake_case : Union[str, Any]=0.0 , _snake_case : List[Any]=True , _snake_case : List[str]=False , _snake_case : Dict=True , _snake_case : List[str]=True , _snake_case : Optional[Any]=1 , _snake_case : Any=0 , _snake_case : str=2 , **_snake_case : Union[str, Any] , ): """simple docstring""" UpperCAmelCase_ = vocab_size UpperCAmelCase_ = d_model UpperCAmelCase_ = decoder_layers UpperCAmelCase_ = decoder_attention_heads UpperCAmelCase_ = decoder_ffn_dim UpperCAmelCase_ = activation_function UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = dropout UpperCAmelCase_ = attention_dropout UpperCAmelCase_ = activation_dropout UpperCAmelCase_ = init_std UpperCAmelCase_ = decoder_layerdrop UpperCAmelCase_ = use_cache UpperCAmelCase_ = scale_embedding UpperCAmelCase_ = use_learned_position_embeddings UpperCAmelCase_ = layernorm_embedding super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
367
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def A (__A : BertModel , __A : str , __A : str ) -> int: """simple docstring""" UpperCAmelCase_ = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''') UpperCAmelCase_ = ( ('''layer.''', '''layer_'''), ('''word_embeddings.weight''', '''word_embeddings'''), ('''position_embeddings.weight''', '''position_embeddings'''), ('''token_type_embeddings.weight''', '''token_type_embeddings'''), ('''.''', '''/'''), ('''LayerNorm/weight''', '''LayerNorm/gamma'''), ('''LayerNorm/bias''', '''LayerNorm/beta'''), ('''weight''', '''kernel'''), ) if not os.path.isdir(__A ): os.makedirs(__A ) UpperCAmelCase_ = model.state_dict() def to_tf_var_name(__A : str ): for patt, repl in iter(__A ): UpperCAmelCase_ = name.replace(__A , __A ) return F"""bert/{name}""" def create_tf_var(__A : np.ndarray , __A : str , __A : tf.Session ): UpperCAmelCase_ = tf.dtypes.as_dtype(tensor.dtype ) UpperCAmelCase_ = tf.get_variable(dtype=__A , shape=tensor.shape , name=__A , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__A ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: UpperCAmelCase_ = to_tf_var_name(__A ) UpperCAmelCase_ = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): UpperCAmelCase_ = torch_tensor.T UpperCAmelCase_ = create_tf_var(tensor=__A , name=__A , session=__A ) tf.keras.backend.set_value(__A , __A ) UpperCAmelCase_ = session.run(__A ) print(F"""Successfully created {tf_name}: {np.allclose(__A , __A )}""" ) UpperCAmelCase_ = tf.train.Saver(tf.trainable_variables() ) saver.save(__A , os.path.join(__A , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) ) def A (__A : Any=None ) -> str: """simple docstring""" UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--model_name''' , type=__A , required=__A , help='''model name e.g. bert-base-uncased''' ) parser.add_argument( '''--cache_dir''' , type=__A , default=__A , required=__A , help='''Directory containing pytorch model''' ) parser.add_argument('''--pytorch_model_path''' , type=__A , required=__A , help='''/path/to/<pytorch-model-name>.bin''' ) parser.add_argument('''--tf_cache_dir''' , type=__A , required=__A , help='''Directory in which to save tensorflow model''' ) UpperCAmelCase_ = parser.parse_args(__A ) UpperCAmelCase_ = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=__A , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
7
0
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable snake_case_ : Optional[int] = { "configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"], "tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : str = [ "GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoXJapaneseForCausalLM", "GPTNeoXJapaneseLayer", "GPTNeoXJapaneseModel", "GPTNeoXJapanesePreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys snake_case_ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
368
import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __snake_case ( unittest.TestCase ): def __init__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict=3 , _snake_case : Dict=32 , _snake_case : List[str]=3 , _snake_case : Union[str, Any]=10 , _snake_case : Tuple=[10, 20, 30, 40] , _snake_case : Dict=[1, 1, 2, 1] , _snake_case : List[Any]=True , _snake_case : Dict=True , _snake_case : Union[str, Any]="relu" , _snake_case : Tuple=3 , _snake_case : Union[str, Any]=None , ): """simple docstring""" UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = image_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = embeddings_size UpperCAmelCase_ = hidden_sizes UpperCAmelCase_ = depths UpperCAmelCase_ = is_training UpperCAmelCase_ = use_labels UpperCAmelCase_ = hidden_act UpperCAmelCase_ = num_labels UpperCAmelCase_ = scope UpperCAmelCase_ = len(_snake_case) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) UpperCAmelCase_ = self.get_config() return config, pixel_values def lowerCamelCase ( self : List[Any]): """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowerCamelCase ( self : Optional[int] , _snake_case : List[Any] , _snake_case : Optional[int]): """simple docstring""" UpperCAmelCase_ = FlaxRegNetModel(config=_snake_case) UpperCAmelCase_ = model(_snake_case) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : Optional[int]): """simple docstring""" UpperCAmelCase_ = self.num_labels UpperCAmelCase_ = FlaxRegNetForImageClassification(config=_snake_case) UpperCAmelCase_ = model(_snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs UpperCAmelCase_ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_flax class __snake_case ( a , unittest.TestCase ): UpperCAmelCase__ : Union[str, Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () UpperCAmelCase__ : Tuple = False UpperCAmelCase__ : List[str] = False UpperCAmelCase__ : int = False def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = FlaxRegNetModelTester(self) UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case) def lowerCamelCase ( self : List[Any]): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase ( self : List[str]): """simple docstring""" return def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case) @unittest.skip(reason='''RegNet does not use inputs_embeds''') def lowerCamelCase ( self : Optional[Any]): """simple docstring""" pass @unittest.skip(reason='''RegNet does not support input and output embeddings''') def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" pass def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ = model_class(_snake_case) UpperCAmelCase_ = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ = [*signature.parameters.keys()] UpperCAmelCase_ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _snake_case) def lowerCamelCase ( self : Optional[int]): """simple docstring""" def check_hidden_states_output(_snake_case : List[str] , _snake_case : Dict , _snake_case : List[str]): UpperCAmelCase_ = model_class(_snake_case) UpperCAmelCase_ = model(**self._prepare_for_class(_snake_case , _snake_case)) UpperCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase_ = self.model_tester.num_stages self.assertEqual(len(_snake_case) , expected_num_stages + 1) UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ = True check_hidden_states_output(_snake_case , _snake_case , _snake_case) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ = True check_hidden_states_output(_snake_case , _snake_case , _snake_case) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): UpperCAmelCase_ = self._prepare_for_class(_snake_case , _snake_case) UpperCAmelCase_ = model_class(_snake_case) @jax.jit def model_jitted(_snake_case : str , **_snake_case : Union[str, Any]): return model(pixel_values=_snake_case , **_snake_case) with self.subTest('''JIT Enabled'''): UpperCAmelCase_ = model_jitted(**_snake_case).to_tuple() with self.subTest('''JIT Disabled'''): with jax.disable_jit(): UpperCAmelCase_ = model_jitted(**_snake_case).to_tuple() self.assertEqual(len(_snake_case) , len(_snake_case)) for jitted_output, output in zip(_snake_case , _snake_case): self.assertEqual(jitted_output.shape , output.shape) def A () -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_flax class __snake_case ( unittest.TestCase ): @cached_property def lowerCamelCase ( self : Dict): """simple docstring""" return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''') if is_vision_available() else None @slow def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''') UpperCAmelCase_ = self.default_image_processor UpperCAmelCase_ = prepare_img() UpperCAmelCase_ = image_processor(images=_snake_case , return_tensors='''np''') UpperCAmelCase_ = model(**_snake_case) # verify the logits UpperCAmelCase_ = (1, 1000) self.assertEqual(outputs.logits.shape , _snake_case) UpperCAmelCase_ = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6]) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4))
7
0
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case_ : List[Any] = logging.get_logger(__name__) snake_case_ : Union[str, Any] = { "facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json", # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class __snake_case ( __snake_case ): UpperCAmelCase__ : List[str] = """vit_mae""" def __init__( self : Dict , _snake_case : str=768 , _snake_case : Any=12 , _snake_case : Optional[int]=12 , _snake_case : str=3072 , _snake_case : Tuple="gelu" , _snake_case : Optional[int]=0.0 , _snake_case : Tuple=0.0 , _snake_case : Optional[Any]=0.0_2 , _snake_case : Union[str, Any]=1e-12 , _snake_case : Dict=224 , _snake_case : Optional[Any]=16 , _snake_case : List[Any]=3 , _snake_case : List[Any]=True , _snake_case : Optional[int]=16 , _snake_case : List[Any]=512 , _snake_case : str=8 , _snake_case : str=2048 , _snake_case : int=0.7_5 , _snake_case : Tuple=False , **_snake_case : List[str] , ): """simple docstring""" super().__init__(**UpperCamelCase__) UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = initializer_range UpperCAmelCase_ = layer_norm_eps UpperCAmelCase_ = image_size UpperCAmelCase_ = patch_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = qkv_bias UpperCAmelCase_ = decoder_num_attention_heads UpperCAmelCase_ = decoder_hidden_size UpperCAmelCase_ = decoder_num_hidden_layers UpperCAmelCase_ = decoder_intermediate_size UpperCAmelCase_ = mask_ratio UpperCAmelCase_ = norm_pix_loss
369
import comet # From: unbabel-comet import torch import datasets snake_case_ : Tuple = datasets.logging.get_logger(__name__) snake_case_ : str = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n" snake_case_ : Tuple = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n" snake_case_ : Optional[int] = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __snake_case ( datasets.Metric ): def lowerCamelCase ( self : Any): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''sources''': datasets.Value('''string''' , id='''sequence'''), '''predictions''': datasets.Value('''string''' , id='''sequence'''), '''references''': datasets.Value('''string''' , id='''sequence'''), }) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[ '''https://github.com/Unbabel/COMET''', '''https://www.aclweb.org/anthology/2020.emnlp-main.213/''', '''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''', ] , ) def lowerCamelCase ( self : List[Any] , _snake_case : Optional[int]): """simple docstring""" if self.config_name == "default": UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''')) else: UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model(self.config_name)) def lowerCamelCase ( self : List[Any] , _snake_case : str , _snake_case : List[str] , _snake_case : Tuple , _snake_case : int=None , _snake_case : Optional[Any]=False): """simple docstring""" if gpus is None: UpperCAmelCase_ = 1 if torch.cuda.is_available() else 0 UpperCAmelCase_ = {'''src''': sources, '''mt''': predictions, '''ref''': references} UpperCAmelCase_ = [dict(zip(_snake_case , _snake_case)) for t in zip(*data.values())] UpperCAmelCase_ , UpperCAmelCase_ = self.scorer.predict(_snake_case , gpus=_snake_case , progress_bar=_snake_case) return {"mean_score": mean_score, "scores": scores}
7
0
import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __snake_case : def __init__( self : int , _snake_case : int , _snake_case : str=13 , _snake_case : int=32 , _snake_case : Union[str, Any]=2 , _snake_case : int=3 , _snake_case : List[str]=16 , _snake_case : List[str]=[1, 2, 1] , _snake_case : List[Any]=[2, 2, 4] , _snake_case : List[str]=2 , _snake_case : Union[str, Any]=2.0 , _snake_case : List[Any]=True , _snake_case : List[str]=0.0 , _snake_case : str=0.0 , _snake_case : Optional[Any]=0.1 , _snake_case : str="gelu" , _snake_case : List[str]=False , _snake_case : Tuple=True , _snake_case : Tuple=0.0_2 , _snake_case : Optional[int]=1e-5 , _snake_case : Any=True , _snake_case : Union[str, Any]=None , _snake_case : Optional[int]=True , _snake_case : Any=10 , _snake_case : Any=8 , ): """simple docstring""" UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = image_size UpperCAmelCase_ = patch_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = embed_dim UpperCAmelCase_ = depths UpperCAmelCase_ = num_heads UpperCAmelCase_ = window_size UpperCAmelCase_ = mlp_ratio UpperCAmelCase_ = qkv_bias UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = drop_path_rate UpperCAmelCase_ = hidden_act UpperCAmelCase_ = use_absolute_embeddings UpperCAmelCase_ = patch_norm UpperCAmelCase_ = layer_norm_eps UpperCAmelCase_ = initializer_range UpperCAmelCase_ = is_training UpperCAmelCase_ = scope UpperCAmelCase_ = use_labels UpperCAmelCase_ = type_sequence_label_size UpperCAmelCase_ = encoder_stride def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) UpperCAmelCase_ = None if self.use_labels: UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size) UpperCAmelCase_ = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self : List[Any]): """simple docstring""" return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def lowerCamelCase ( self : str , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : List[str]): """simple docstring""" UpperCAmelCase_ = SwinvaModel(config=__snake_case) model.to(__snake_case) model.eval() UpperCAmelCase_ = model(__snake_case) UpperCAmelCase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)) UpperCAmelCase_ = int(config.embed_dim * 2 ** (len(config.depths) - 1)) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim)) def lowerCamelCase ( self : List[str] , _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : int): """simple docstring""" UpperCAmelCase_ = SwinvaForMaskedImageModeling(config=__snake_case) model.to(__snake_case) model.eval() UpperCAmelCase_ = model(__snake_case) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size)) # test greyscale images UpperCAmelCase_ = 1 UpperCAmelCase_ = SwinvaForMaskedImageModeling(__snake_case) model.to(__snake_case) model.eval() UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) UpperCAmelCase_ = model(__snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size)) def lowerCamelCase ( self : Union[str, Any] , _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : Tuple): """simple docstring""" UpperCAmelCase_ = self.type_sequence_label_size UpperCAmelCase_ = SwinvaForImageClassification(__snake_case) model.to(__snake_case) model.eval() UpperCAmelCase_ = model(__snake_case , labels=__snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs UpperCAmelCase_ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __snake_case ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): UpperCAmelCase__ : Any = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) UpperCAmelCase__ : List[str] = ( {'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification} if is_torch_available() else {} ) UpperCAmelCase__ : Tuple = False UpperCAmelCase__ : str = False UpperCAmelCase__ : Dict = False UpperCAmelCase__ : Union[str, Any] = False def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = SwinvaModelTester(self) UpperCAmelCase_ = ConfigTester(self , config_class=__snake_case , embed_dim=37) def lowerCamelCase ( self : List[str]): """simple docstring""" self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case) @unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''') def lowerCamelCase ( self : str): """simple docstring""" pass @unittest.skip(reason='''Swinv2 does not use inputs_embeds''') def lowerCamelCase ( self : int): """simple docstring""" pass def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ = model_class(__snake_case) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) UpperCAmelCase_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__snake_case , nn.Linear)) def lowerCamelCase ( self : Any): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ = model_class(__snake_case) UpperCAmelCase_ = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ = [*signature.parameters.keys()] UpperCAmelCase_ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __snake_case) def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ = True for model_class in self.all_model_classes: UpperCAmelCase_ = True UpperCAmelCase_ = False UpperCAmelCase_ = True UpperCAmelCase_ = model_class(__snake_case) model.to(__snake_case) model.eval() with torch.no_grad(): UpperCAmelCase_ = model(**self._prepare_for_class(__snake_case , __snake_case)) UpperCAmelCase_ = outputs.attentions UpperCAmelCase_ = len(self.model_tester.depths) self.assertEqual(len(__snake_case) , __snake_case) # check that output_attentions also work using config del inputs_dict["output_attentions"] UpperCAmelCase_ = True UpperCAmelCase_ = config.window_size**2 UpperCAmelCase_ = model_class(__snake_case) model.to(__snake_case) model.eval() with torch.no_grad(): UpperCAmelCase_ = model(**self._prepare_for_class(__snake_case , __snake_case)) UpperCAmelCase_ = outputs.attentions self.assertEqual(len(__snake_case) , __snake_case) self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) UpperCAmelCase_ = len(__snake_case) # Check attention is always last and order is fine UpperCAmelCase_ = True UpperCAmelCase_ = True UpperCAmelCase_ = model_class(__snake_case) model.to(__snake_case) model.eval() with torch.no_grad(): UpperCAmelCase_ = model(**self._prepare_for_class(__snake_case , __snake_case)) if hasattr(self.model_tester , '''num_hidden_states_types'''): UpperCAmelCase_ = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states UpperCAmelCase_ = 2 self.assertEqual(out_len + added_hidden_states , len(__snake_case)) UpperCAmelCase_ = outputs.attentions self.assertEqual(len(__snake_case) , __snake_case) self.assertListEqual( list(self_attentions[0].shape[-3:]) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def lowerCamelCase ( self : Any , _snake_case : Any , _snake_case : List[str] , _snake_case : List[Any] , _snake_case : Any): """simple docstring""" UpperCAmelCase_ = model_class(__snake_case) model.to(__snake_case) model.eval() with torch.no_grad(): UpperCAmelCase_ = model(**self._prepare_for_class(__snake_case , __snake_case)) UpperCAmelCase_ = outputs.hidden_states UpperCAmelCase_ = getattr( self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths) + 1) self.assertEqual(len(__snake_case) , __snake_case) # Swinv2 has a different seq_length UpperCAmelCase_ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) UpperCAmelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , ) UpperCAmelCase_ = outputs.reshaped_hidden_states self.assertEqual(len(__snake_case) , __snake_case) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = reshaped_hidden_states[0].shape UpperCAmelCase_ = ( reshaped_hidden_states[0].view(__snake_case , __snake_case , height * width).permute(0 , 2 , 1) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , ) def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: UpperCAmelCase_ = True self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , __snake_case) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ = True self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , __snake_case) def lowerCamelCase ( self : Any): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ = 3 UpperCAmelCase_ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) UpperCAmelCase_ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) UpperCAmelCase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) UpperCAmelCase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: UpperCAmelCase_ = True self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , (padded_height, padded_width)) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ = True self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , (padded_height, padded_width)) def lowerCamelCase ( self : Any): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__snake_case) def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__snake_case) @slow def lowerCamelCase ( self : Optional[int]): """simple docstring""" for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ = SwinvaModel.from_pretrained(__snake_case) self.assertIsNotNone(__snake_case) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ = _config_zero_init(__snake_case) for model_class in self.all_model_classes: UpperCAmelCase_ = model_class(config=__snake_case) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @require_vision @require_torch class __snake_case ( unittest.TestCase ): @cached_property def lowerCamelCase ( self : Any): """simple docstring""" return ( AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''') if is_vision_available() else None ) @slow def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''').to( __snake_case) UpperCAmelCase_ = self.default_image_processor UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') UpperCAmelCase_ = image_processor(images=__snake_case , return_tensors='''pt''').to(__snake_case) # forward pass with torch.no_grad(): UpperCAmelCase_ = model(**__snake_case) # verify the logits UpperCAmelCase_ = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape , __snake_case) UpperCAmelCase_ = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6]).to(__snake_case) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4))
370
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class __snake_case ( a ): UpperCAmelCase__ : Optional[int] = (DPMSolverSinglestepScheduler,) UpperCAmelCase__ : str = (('''num_inference_steps''', 2_5),) def lowerCamelCase ( self : Dict , **_snake_case : Dict): """simple docstring""" UpperCAmelCase_ = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''prediction_type''': '''epsilon''', '''thresholding''': False, '''sample_max_value''': 1.0, '''algorithm_type''': '''dpmsolver++''', '''solver_type''': '''midpoint''', '''lambda_min_clipped''': -float('''inf'''), '''variance_type''': None, } config.update(**_snake_case) return config def lowerCamelCase ( self : Dict , _snake_case : int=0 , **_snake_case : List[Any]): """simple docstring""" UpperCAmelCase_ = dict(self.forward_default_kwargs) UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case) UpperCAmelCase_ = self.dummy_sample UpperCAmelCase_ = 0.1 * sample UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: UpperCAmelCase_ = self.get_scheduler_config(**_snake_case) UpperCAmelCase_ = scheduler_class(**_snake_case) scheduler.set_timesteps(_snake_case) # copy over dummy past residuals UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_snake_case) UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case) new_scheduler.set_timesteps(_snake_case) # copy over dummy past residuals UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order] UpperCAmelCase_ , UpperCAmelCase_ = sample, sample for t in range(_snake_case , time_step + scheduler.config.solver_order + 1): UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def lowerCamelCase ( self : Tuple): """simple docstring""" pass def lowerCamelCase ( self : Tuple , _snake_case : Optional[Any]=0 , **_snake_case : int): """simple docstring""" UpperCAmelCase_ = dict(self.forward_default_kwargs) UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case) UpperCAmelCase_ = self.dummy_sample UpperCAmelCase_ = 0.1 * sample UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: UpperCAmelCase_ = self.get_scheduler_config() UpperCAmelCase_ = scheduler_class(**_snake_case) scheduler.set_timesteps(_snake_case) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_snake_case) UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case) # copy over dummy past residuals new_scheduler.set_timesteps(_snake_case) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order] UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def lowerCamelCase ( self : Dict , _snake_case : int=None , **_snake_case : Optional[Any]): """simple docstring""" if scheduler is None: UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config(**_snake_case) UpperCAmelCase_ = scheduler_class(**_snake_case) UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config(**_snake_case) UpperCAmelCase_ = scheduler_class(**_snake_case) UpperCAmelCase_ = 10 UpperCAmelCase_ = self.dummy_model() UpperCAmelCase_ = self.dummy_sample_deter scheduler.set_timesteps(_snake_case) for i, t in enumerate(scheduler.timesteps): UpperCAmelCase_ = model(_snake_case , _snake_case) UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample return sample def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config()) UpperCAmelCase_ = 50 UpperCAmelCase_ = self.dummy_model() UpperCAmelCase_ = self.dummy_sample_deter scheduler.set_timesteps(_snake_case) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:]): UpperCAmelCase_ = model(_snake_case , _snake_case) UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.2_5_7_4) < 1e-3 def lowerCamelCase ( self : int): """simple docstring""" for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=_snake_case) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config()) UpperCAmelCase_ = self.full_loop(scheduler=_snake_case) UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3 UpperCAmelCase_ = DEISMultistepScheduler.from_config(scheduler.config) UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config) UpperCAmelCase_ = UniPCMultistepScheduler.from_config(scheduler.config) UpperCAmelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config) UpperCAmelCase_ = self.full_loop(scheduler=_snake_case) UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3 def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" self.check_over_configs(thresholding=_snake_case) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , algorithm_type='''dpmsolver++''' , solver_order=_snake_case , solver_type=_snake_case , ) def lowerCamelCase ( self : Dict): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_snake_case) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , ) UpperCAmelCase_ = self.full_loop( solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , ) assert not torch.isnan(_snake_case).any(), "Samples have nan numbers" def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" self.check_over_configs(lower_order_final=_snake_case) self.check_over_configs(lower_order_final=_snake_case) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" self.check_over_configs(lambda_min_clipped=-float('''inf''')) self.check_over_configs(lambda_min_clipped=-5.1) def lowerCamelCase ( self : int): """simple docstring""" self.check_over_configs(variance_type=_snake_case) self.check_over_configs(variance_type='''learned_range''') def lowerCamelCase ( self : Optional[Any]): """simple docstring""" for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=_snake_case , time_step=0) def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = self.full_loop() UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3 def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = self.full_loop(use_karras_sigmas=_snake_case) UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.2_2_4_8) < 1e-3 def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''') UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.1_4_5_3) < 1e-3 def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_snake_case) UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.0_6_4_9) < 1e-3 def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config(thresholding=_snake_case , dynamic_thresholding_ratio=0) UpperCAmelCase_ = scheduler_class(**_snake_case) UpperCAmelCase_ = 10 UpperCAmelCase_ = self.dummy_model() UpperCAmelCase_ = self.dummy_sample_deter.half() scheduler.set_timesteps(_snake_case) for i, t in enumerate(scheduler.timesteps): UpperCAmelCase_ = model(_snake_case , _snake_case) UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample assert sample.dtype == torch.floataa
7
0
import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class __snake_case ( _SCREAMING_SNAKE_CASE , unittest.TestCase ): UpperCAmelCase__ : Dict = DebertaTokenizer UpperCAmelCase__ : List[Any] = True UpperCAmelCase__ : Tuple = DebertaTokenizerFast def lowerCamelCase ( self : Optional[int]): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase_ = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''[UNK]''', ] UpperCAmelCase_ = dict(zip(A_ , range(len(A_)))) UpperCAmelCase_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] UpperCAmelCase_ = {'''unk_token''': '''[UNK]'''} UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file''']) UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file''']) with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp: fp.write(json.dumps(A_) + '''\n''') with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp: fp.write('''\n'''.join(A_)) def lowerCamelCase ( self : int , **_snake_case : Any): """simple docstring""" kwargs.update(self.special_tokens_map) return self.tokenizer_class.from_pretrained(self.tmpdirname , **A_) def lowerCamelCase ( self : Optional[int] , _snake_case : str): """simple docstring""" UpperCAmelCase_ = '''lower newer''' UpperCAmelCase_ = '''lower newer''' return input_text, output_text def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = '''lower newer''' UpperCAmelCase_ = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] UpperCAmelCase_ = tokenizer.tokenize(A_) self.assertListEqual(A_ , A_) UpperCAmelCase_ = tokens + [tokenizer.unk_token] UpperCAmelCase_ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_) , A_) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = tokenizer('''Hello''' , '''World''') UpperCAmelCase_ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['''token_type_ids'''] , A_) @slow def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''') UpperCAmelCase_ = tokenizer.encode('''sequence builders''' , add_special_tokens=A_) UpperCAmelCase_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=A_) UpperCAmelCase_ = tokenizer.encode( '''sequence builders''' , add_special_tokens=A_ , add_prefix_space=A_) UpperCAmelCase_ = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=A_ , add_prefix_space=A_) UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(A_) UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(A_ , A_) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class) for tokenizer_class in tokenizer_classes: UpperCAmelCase_ = tokenizer_class.from_pretrained('''microsoft/deberta-base''') UpperCAmelCase_ = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] UpperCAmelCase_ = tokenizer(A_ , padding=A_) UpperCAmelCase_ = [tokenizer.decode(A_ , skip_special_tokens=A_) for seq in encoding['''input_ids''']] # fmt: off UpperCAmelCase_ = { '''input_ids''': [ [1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2] ], '''token_type_ids''': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on UpperCAmelCase_ = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] self.assertDictEqual(encoding.data , A_) for expected, decoded in zip(A_ , A_): self.assertEqual(A_ , A_)
371
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) snake_case_ : List[Any] = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Tuple = ["DeiTFeatureExtractor"] snake_case_ : List[str] = ["DeiTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : List[Any] = [ "DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "DeiTForImageClassification", "DeiTForImageClassificationWithTeacher", "DeiTForMaskedImageModeling", "DeiTModel", "DeiTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Dict = [ "TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDeiTForImageClassification", "TFDeiTForImageClassificationWithTeacher", "TFDeiTForMaskedImageModeling", "TFDeiTModel", "TFDeiTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys snake_case_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
7
0
import copy import tempfile import unittest from transformers import MaMaaaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder def A (__A : List[Any] , __A : Optional[Any] , __A : Optional[Any] , __A : Dict=None , __A : Optional[Any]=None , __A : Optional[int]=None , __A : Optional[Any]=None , __A : Any=None , ) -> Dict: """simple docstring""" if attention_mask is None: UpperCAmelCase_ = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: UpperCAmelCase_ = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: UpperCAmelCase_ = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=__A ) if decoder_head_mask is None: UpperCAmelCase_ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__A ) if cross_attn_head_mask is None: UpperCAmelCase_ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__A ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class __snake_case : def __init__( self : List[str] , _snake_case : Union[str, Any] , _snake_case : int=13 , _snake_case : Dict=7 , _snake_case : Tuple=True , _snake_case : List[Any]=False , _snake_case : Tuple=99 , _snake_case : Optional[Any]=16 , _snake_case : Optional[int]=2 , _snake_case : List[Any]=4 , _snake_case : Any=4 , _snake_case : Dict="relu" , _snake_case : Optional[Any]=0.1 , _snake_case : int=0.1 , _snake_case : Tuple=0.0 , _snake_case : int=0.0 , _snake_case : Any=20 , _snake_case : Union[str, Any]=2 , _snake_case : str=1 , _snake_case : Union[str, Any]=0 , ): """simple docstring""" UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = seq_length UpperCAmelCase_ = is_training UpperCAmelCase_ = use_labels UpperCAmelCase_ = vocab_size UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = encoder_layerdrop UpperCAmelCase_ = decoder_layerdrop UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = eos_token_id UpperCAmelCase_ = pad_token_id UpperCAmelCase_ = bos_token_id def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) UpperCAmelCase_ = self.eos_token_id # Eos Token UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) # we need to clamp the input ids here to avoid having pad token in between # this is because for M2M100 the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input UpperCAmelCase_ = input_ids.clamp(self.pad_token_id + 1) UpperCAmelCase_ = decoder_input_ids.clamp(self.pad_token_id + 1) UpperCAmelCase_ = self.get_config() UpperCAmelCase_ = prepare_mam_aaa_inputs_dict(_snake_case , _snake_case , _snake_case) return config, inputs_dict def lowerCamelCase ( self : int): """simple docstring""" return MaMaaaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , ) def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs() return config, inputs_dict def lowerCamelCase ( self : str , _snake_case : List[Any] , _snake_case : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = MaMaaaModel(config=_snake_case).get_decoder().to(_snake_case).eval() UpperCAmelCase_ = inputs_dict['''input_ids'''] UpperCAmelCase_ = inputs_dict['''attention_mask'''] UpperCAmelCase_ = inputs_dict['''head_mask'''] # first forward pass UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , head_mask=_snake_case , use_cache=_snake_case) UpperCAmelCase_ , UpperCAmelCase_ = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size) UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , 2) # append to next input_ids and UpperCAmelCase_ = torch.cat([input_ids, next_tokens] , dim=-1) UpperCAmelCase_ = torch.cat([attention_mask, next_attn_mask] , dim=-1) UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case)['''last_hidden_state'''] UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , past_key_values=_snake_case)[ '''last_hidden_state''' ] # select random slice UpperCAmelCase_ = ids_tensor((1,) , output_from_past.shape[-1]).item() UpperCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1e-2)) def lowerCamelCase ( self : Optional[int] , _snake_case : str , _snake_case : List[Any]): """simple docstring""" UpperCAmelCase_ = MaMaaaModel(config=_snake_case).to(_snake_case).eval() UpperCAmelCase_ = model(**_snake_case) UpperCAmelCase_ = outputs.encoder_last_hidden_state UpperCAmelCase_ = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = model.get_encoder() encoder.save_pretrained(_snake_case) UpperCAmelCase_ = MaMaaaEncoder.from_pretrained(_snake_case).to(_snake_case) UpperCAmelCase_ = encoder(inputs_dict['''input_ids'''] , attention_mask=inputs_dict['''attention_mask'''])[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = model.get_decoder() decoder.save_pretrained(_snake_case) UpperCAmelCase_ = MaMaaaDecoder.from_pretrained(_snake_case).to(_snake_case) UpperCAmelCase_ = decoder( input_ids=inputs_dict['''decoder_input_ids'''] , attention_mask=inputs_dict['''decoder_attention_mask'''] , encoder_hidden_states=_snake_case , encoder_attention_mask=inputs_dict['''attention_mask'''] , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3) @require_torch class __snake_case ( a , a , a , unittest.TestCase ): UpperCAmelCase__ : List[str] = ( ( MaMaaaModel, MaMaaaForConditionalGeneration, ) if is_torch_available() else () ) UpperCAmelCase__ : List[str] = (MaMaaaForConditionalGeneration,) if is_torch_available() else () UpperCAmelCase__ : Union[str, Any] = ( { '''conversational''': MaMaaaForConditionalGeneration, '''feature-extraction''': MaMaaaModel, '''summarization''': MaMaaaForConditionalGeneration, '''text2text-generation''': MaMaaaForConditionalGeneration, '''translation''': MaMaaaForConditionalGeneration, } if is_torch_available() else {} ) UpperCAmelCase__ : Optional[int] = True UpperCAmelCase__ : Any = True UpperCAmelCase__ : List[str] = False UpperCAmelCase__ : Optional[int] = False def lowerCamelCase ( self : Union[str, Any] , _snake_case : List[Any] , _snake_case : Dict , _snake_case : Any , _snake_case : List[Any] , _snake_case : Tuple): """simple docstring""" if pipeline_test_casse_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = MaMaaaModelTester(self) UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case) def lowerCamelCase ( self : List[Any]): """simple docstring""" self.config_tester.run_common_tests() def lowerCamelCase ( self : Any): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: UpperCAmelCase_ = model_class(_snake_case) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_snake_case) UpperCAmelCase_ , UpperCAmelCase_ = model_class.from_pretrained(_snake_case , output_loading_info=_snake_case) self.assertEqual(info['''missing_keys'''] , []) def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*_snake_case) def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*_snake_case) def lowerCamelCase ( self : Any): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration): UpperCAmelCase_ = model_class(_snake_case) model.to(_snake_case) model.eval() UpperCAmelCase_ = copy.deepcopy(self._prepare_for_class(_snake_case , _snake_case)) if not self.is_encoder_decoder: UpperCAmelCase_ = inputs['''input_ids'''] del inputs["input_ids"] else: UpperCAmelCase_ = inputs['''input_ids'''] UpperCAmelCase_ = inputs.get('''decoder_input_ids''' , _snake_case) del inputs["input_ids"] inputs.pop('''decoder_input_ids''' , _snake_case) UpperCAmelCase_ = model.get_input_embeddings() if not self.is_encoder_decoder: UpperCAmelCase_ = wte(_snake_case) else: UpperCAmelCase_ = wte(_snake_case) UpperCAmelCase_ = wte(_snake_case) with torch.no_grad(): model(**_snake_case)[0] def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() UpperCAmelCase_ = input_dict['''input_ids'''] UpperCAmelCase_ = input_ids.ne(1).to(_snake_case) UpperCAmelCase_ = MaMaaaForConditionalGeneration(_snake_case).eval().to(_snake_case) if torch_device == "cuda": model.half() model.generate(_snake_case , attention_mask=_snake_case) model.generate(num_beams=4 , do_sample=_snake_case , early_stopping=_snake_case , num_return_sequences=3) def A (__A : List[str] ) -> Any: """simple docstring""" return torch.tensor(__A , dtype=torch.long , device=__A ) snake_case_ : Tuple = 1e-4 @require_torch @require_sentencepiece @require_tokenizers @slow class __snake_case ( unittest.TestCase ): @cached_property def lowerCamelCase ( self : Tuple): """simple docstring""" return MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''') def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = MaMaaaModel.from_pretrained('''facebook/m2m100_418M''').to(_snake_case) UpperCAmelCase_ = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]]) UpperCAmelCase_ = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]]) UpperCAmelCase_ = prepare_mam_aaa_inputs_dict(model.config , _snake_case , _snake_case) with torch.no_grad(): UpperCAmelCase_ = model(**_snake_case)[0] UpperCAmelCase_ = torch.Size((1, 11, 1024)) self.assertEqual(output.shape , _snake_case) # change to expected output here UpperCAmelCase_ = torch.tensor( [[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=_snake_case) self.assertTrue(torch.allclose(output[:, :3, :3] , _snake_case , atol=_snake_case)) def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''').to(_snake_case) # change to intended input UpperCAmelCase_ = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]]) UpperCAmelCase_ = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]]) UpperCAmelCase_ = prepare_mam_aaa_inputs_dict(model.config , _snake_case , _snake_case) with torch.no_grad(): UpperCAmelCase_ = model(**_snake_case)[0] UpperCAmelCase_ = torch.Size((1, 11, model.config.vocab_size)) self.assertEqual(output.shape , _snake_case) # change to expected output here UpperCAmelCase_ = torch.tensor( [[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=_snake_case) self.assertTrue(torch.allclose(output[:, :3, :3] , _snake_case , atol=_snake_case)) def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''').to(_snake_case) UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''' , src_lang='''fr''' , tgt_lang='''en''') UpperCAmelCase_ = [ '''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''', '''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''', '''Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent''' ''' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de''' ''' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.''', ] # The below article tests that we don't add any hypotheses outside of the top n_beams UpperCAmelCase_ = tokenizer(_snake_case , padding=_snake_case , return_tensors='''pt''') UpperCAmelCase_ = model.generate( input_ids=dct['''input_ids'''].to(_snake_case) , attention_mask=dct['''attention_mask'''].to(_snake_case) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('''en''') , ) UpperCAmelCase_ = [ '''The NSA case highlights the total absence of intelligence debate''', '''I think there are two levels of response from the French government.''', '''When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.''' ''' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all''' ''' communications in France.''', ] UpperCAmelCase_ = tokenizer.batch_decode( hypotheses_batch.tolist() , clean_up_tokenization_spaces=_snake_case , skip_special_tokens=_snake_case) assert generated == expected_en
350
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets snake_case_ : Dict = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n" snake_case_ : List[str] = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n" snake_case_ : List[Any] = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __snake_case ( datasets.Metric ): def lowerCamelCase ( self : Tuple): """simple docstring""" if version.parse(scb.__version__) < version.parse('''1.4.12'''): raise ImportWarning( '''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n''' '''You can install it with `pip install "sacrebleu>=1.4.12"`.''') return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence'''), '''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''') , id='''references'''), }) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[ '''https://github.com/jhclark/tercom''', ] , ) def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , ): """simple docstring""" UpperCAmelCase_ = len(references[0]) if any(len(_snake_case) != references_per_prediction for refs in references): raise ValueError('''Sacrebleu requires the same number of references for each prediction''') UpperCAmelCase_ = [[refs[i] for refs in references] for i in range(_snake_case)] UpperCAmelCase_ = TER( normalized=_snake_case , no_punct=_snake_case , asian_support=_snake_case , case_sensitive=_snake_case , ) UpperCAmelCase_ = sb_ter.corpus_score(_snake_case , _snake_case) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
7
0
import argparse from collections import defaultdict def A (__A : List[str] , __A : Dict , __A : Optional[int] , __A : Tuple , __A : List[Any] ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ = F"""{file}_{class_name}_{test_name}""" done_test[_id] += 1 with open(__A , '''r''' ) as f: UpperCAmelCase_ = f.readlines() UpperCAmelCase_ = F"""class {class_name}(""" UpperCAmelCase_ = F"""{4 * " "}def {test_name}(""" UpperCAmelCase_ = F"""{8 * " "}{correct_line.split()[0]}""" UpperCAmelCase_ = F"""{16 * " "}{correct_line.split()[0]}""" UpperCAmelCase_ = False UpperCAmelCase_ = False UpperCAmelCase_ = False UpperCAmelCase_ = False UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 UpperCAmelCase_ = [] for line in lines: if line.startswith(__A ): UpperCAmelCase_ = True elif in_class and line.startswith(__A ): UpperCAmelCase_ = True elif in_class and in_func and (line.startswith(__A ) or line.startswith(__A )): UpperCAmelCase_ = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: UpperCAmelCase_ = True if in_class and in_func and in_line: if ")" not in line: continue else: UpperCAmelCase_ = True if in_class and in_func and in_line and insert_line: new_lines.append(F"""{spaces * " "}{correct_line}""" ) UpperCAmelCase_ = UpperCAmelCase_ = UpperCAmelCase_ = UpperCAmelCase_ = False else: new_lines.append(__A ) with open(__A , '''w''' ) as f: for line in new_lines: f.write(__A ) def A (__A : Dict , __A : Dict=None ) -> Optional[int]: """simple docstring""" if fail is not None: with open(__A , '''r''' ) as f: UpperCAmelCase_ = {l.strip() for l in f.readlines()} else: UpperCAmelCase_ = None with open(__A , '''r''' ) as f: UpperCAmelCase_ = f.readlines() UpperCAmelCase_ = defaultdict(__A ) for line in correct_lines: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = line.split(''';''' ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(__A , __A , __A , __A , __A ) if __name__ == "__main__": snake_case_ : Tuple = argparse.ArgumentParser() parser.add_argument("--correct_filename", help="filename of tests with expected result") parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None) snake_case_ : Union[str, Any] = parser.parse_args() main(args.correct_filename, args.fail_filename)
351
import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class __snake_case ( unittest.TestCase , a ): def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = load_tool('''text-to-speech''') self.tool.setup() def lowerCamelCase ( self : int): """simple docstring""" torch.manual_seed(0) UpperCAmelCase_ = self.tool('''hey''') UpperCAmelCase_ = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , )) def lowerCamelCase ( self : Any): """simple docstring""" torch.manual_seed(0) UpperCAmelCase_ = self.tool('''hey''') UpperCAmelCase_ = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , ))
7
0
import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class __snake_case ( pl.LightningModule ): def __init__( self : str , _snake_case : List[str]): """simple docstring""" super().__init__() UpperCAmelCase_ = model UpperCAmelCase_ = 2 UpperCAmelCase_ = nn.Linear(self.model.config.hidden_size , self.num_labels) def lowerCamelCase ( self : int): """simple docstring""" pass def A (__A : str , __A : str , __A : str ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ = LongformerModel.from_pretrained(__A ) UpperCAmelCase_ = LightningModel(__A ) UpperCAmelCase_ = torch.load(__A , map_location=torch.device('''cpu''' ) ) lightning_model.load_state_dict(ckpt['''state_dict'''] ) # init longformer question answering model UpperCAmelCase_ = LongformerForQuestionAnswering.from_pretrained(__A ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(__A ) print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" ) if __name__ == "__main__": snake_case_ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--longformer_model", default=None, type=str, required=True, help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.", ) parser.add_argument( "--longformer_question_answering_ckpt_path", default=None, type=str, required=True, help="Path the official PyTorch Lightning Checkpoint.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) snake_case_ : List[str] = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
352
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) else: from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel, UTransformeraDModel from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
7
0
"""simple docstring""" import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants snake_case_ : List[Any] = Mapping[str, np.ndarray] snake_case_ : Any = Mapping[str, Any] # Is a nested dict. snake_case_ : Dict = 0.01 @dataclasses.dataclass(frozen=a ) class __snake_case : UpperCAmelCase__ : np.ndarray # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. UpperCAmelCase__ : np.ndarray # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. UpperCAmelCase__ : np.ndarray # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. UpperCAmelCase__ : np.ndarray # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. UpperCAmelCase__ : np.ndarray # [num_res, num_atom_type] # Chain indices for multi-chain predictions UpperCAmelCase__ : Optional[np.ndarray] = None # Optional remark about the protein. Included as a comment in output PDB # files UpperCAmelCase__ : Optional[str] = None # Templates used to generate this protein (prediction-only) UpperCAmelCase__ : Optional[Sequence[str]] = None # Chain corresponding to each parent UpperCAmelCase__ : Optional[Sequence[int]] = None def A (__A : str ) -> Protein: """simple docstring""" UpperCAmelCase_ = R'''(\[[A-Z]+\]\n)''' UpperCAmelCase_ = [tag.strip() for tag in re.split(__A , __A ) if len(__A ) > 0] UpperCAmelCase_ = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] ) UpperCAmelCase_ = ['''N''', '''CA''', '''C'''] UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = None for g in groups: if "[PRIMARY]" == g[0]: UpperCAmelCase_ = g[1][0].strip() for i in range(len(__A ) ): if seq[i] not in residue_constants.restypes: UpperCAmelCase_ = '''X''' # FIXME: strings are immutable UpperCAmelCase_ = np.array( [residue_constants.restype_order.get(__A , residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: UpperCAmelCase_ = [] for axis in range(3 ): tertiary.append(list(map(__A , g[1][axis].split() ) ) ) UpperCAmelCase_ = np.array(__A ) UpperCAmelCase_ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(__A ): UpperCAmelCase_ = np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: UpperCAmelCase_ = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) ) UpperCAmelCase_ = np.zeros( ( len(__A ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(__A ): UpperCAmelCase_ = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=__A , atom_mask=__A , aatype=__A , residue_index=np.arange(len(__A ) ) , b_factors=__A , ) def A (__A : Protein , __A : int = 0 ) -> List[str]: """simple docstring""" UpperCAmelCase_ = [] UpperCAmelCase_ = prot.remark if remark is not None: pdb_headers.append(F"""REMARK {remark}""" ) UpperCAmelCase_ = prot.parents UpperCAmelCase_ = prot.parents_chain_index if parents is not None and parents_chain_index is not None: UpperCAmelCase_ = [p for i, p in zip(__A , __A ) if i == chain_id] if parents is None or len(__A ) == 0: UpperCAmelCase_ = ['''N/A'''] pdb_headers.append(F"""PARENT {" ".join(__A )}""" ) return pdb_headers def A (__A : Protein , __A : str ) -> str: """simple docstring""" UpperCAmelCase_ = [] UpperCAmelCase_ = pdb_str.split('''\n''' ) UpperCAmelCase_ = prot.remark if remark is not None: out_pdb_lines.append(F"""REMARK {remark}""" ) UpperCAmelCase_ = 42 if prot.parents is not None and len(prot.parents ) > 0: UpperCAmelCase_ = [] if prot.parents_chain_index is not None: UpperCAmelCase_ = {} for p, i in zip(prot.parents , prot.parents_chain_index ): parent_dict.setdefault(str(__A ) , [] ) parent_dict[str(__A )].append(__A ) UpperCAmelCase_ = max([int(__A ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): UpperCAmelCase_ = parent_dict.get(str(__A ) , ['''N/A'''] ) parents_per_chain.append(__A ) else: parents_per_chain.append(list(prot.parents ) ) else: UpperCAmelCase_ = [['''N/A''']] def make_parent_line(__A : Sequence[str] ) -> str: return F"""PARENT {" ".join(__A )}""" out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) UpperCAmelCase_ = 0 for i, l in enumerate(__A ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(__A ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(__A ): UpperCAmelCase_ = parents_per_chain[chain_counter] else: UpperCAmelCase_ = ['''N/A'''] out_pdb_lines.append(make_parent_line(__A ) ) return "\n".join(__A ) def A (__A : Protein ) -> str: """simple docstring""" UpperCAmelCase_ = residue_constants.restypes + ['''X'''] def res_atoa(__A : int ) -> str: return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' ) UpperCAmelCase_ = residue_constants.atom_types UpperCAmelCase_ = [] UpperCAmelCase_ = prot.atom_mask UpperCAmelCase_ = prot.aatype UpperCAmelCase_ = prot.atom_positions UpperCAmelCase_ = prot.residue_index.astype(np.intaa ) UpperCAmelCase_ = prot.b_factors UpperCAmelCase_ = prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError('''Invalid aatypes.''' ) UpperCAmelCase_ = get_pdb_headers(__A ) if len(__A ) > 0: pdb_lines.extend(__A ) UpperCAmelCase_ = aatype.shape[0] UpperCAmelCase_ = 1 UpperCAmelCase_ = 0 UpperCAmelCase_ = string.ascii_uppercase UpperCAmelCase_ = None # Add all atom sites. for i in range(__A ): UpperCAmelCase_ = res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(__A , atom_positions[i] , atom_mask[i] , b_factors[i] ): if mask < 0.5: continue UpperCAmelCase_ = '''ATOM''' UpperCAmelCase_ = atom_name if len(__A ) == 4 else F""" {atom_name}""" UpperCAmelCase_ = '''''' UpperCAmelCase_ = '''''' UpperCAmelCase_ = 1.00 UpperCAmelCase_ = atom_name[0] # Protein supports only C, N, O, S, this works. UpperCAmelCase_ = '''''' UpperCAmelCase_ = '''A''' if chain_index is not None: UpperCAmelCase_ = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! UpperCAmelCase_ = ( F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}""" F"""{res_name_a:>3} {chain_tag:>1}""" F"""{residue_index[i]:>4}{insertion_code:>1} """ F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}""" F"""{occupancy:>6.2f}{b_factor:>6.2f} """ F"""{element:>2}{charge:>2}""" ) pdb_lines.append(__A ) atom_index += 1 UpperCAmelCase_ = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: UpperCAmelCase_ = True UpperCAmelCase_ = chain_index[i + 1] if should_terminate: # Close the chain. UpperCAmelCase_ = '''TER''' UpperCAmelCase_ = ( F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}""" ) pdb_lines.append(__A ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(__A , __A ) ) pdb_lines.append('''END''' ) pdb_lines.append('''''' ) return "\n".join(__A ) def A (__A : Protein ) -> np.ndarray: """simple docstring""" return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def A (__A : FeatureDict , __A : ModelOutput , __A : Optional[np.ndarray] = None , __A : Optional[np.ndarray] = None , __A : Optional[str] = None , __A : Optional[Sequence[str]] = None , __A : Optional[Sequence[int]] = None , ) -> Protein: """simple docstring""" return Protein( aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=__A , remark=__A , parents=__A , parents_chain_index=__A , )
353
import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __snake_case : @staticmethod def lowerCamelCase ( *_snake_case : List[str] , **_snake_case : str): """simple docstring""" pass @is_pipeline_test @require_torch @require_vision class __snake_case ( unittest.TestCase ): UpperCAmelCase__ : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def lowerCamelCase ( self : Any , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''') UpperCAmelCase_ = [ { '''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''), '''question''': '''How many cats are there?''', }, { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''question''': '''How many cats are there?''', }, ] return vqa_pipeline, examples def lowerCamelCase ( self : Optional[int] , _snake_case : List[str] , _snake_case : Optional[int]): """simple docstring""" UpperCAmelCase_ = vqa_pipeline(_snake_case , top_k=1) self.assertEqual( _snake_case , [ [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}], [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}], ] , ) @require_torch def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''') UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png''' UpperCAmelCase_ = '''How many cats are there?''' UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2) self.assertEqual( _snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}]) UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2) self.assertEqual( _snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}]) @slow @require_torch def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''') UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png''' UpperCAmelCase_ = '''How many cats are there?''' UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2) self.assertEqual( nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]) UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2) self.assertEqual( nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]) UpperCAmelCase_ = vqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2) self.assertEqual( nested_simplify(_snake_case , decimals=4) , [[{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]] * 2 , ) @require_tf @unittest.skip('''Visual question answering not implemented in TF''') def lowerCamelCase ( self : Tuple): """simple docstring""" pass
7
0
import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __snake_case ( a , unittest.TestCase ): UpperCAmelCase__ : Tuple = OpenAIGPTTokenizer UpperCAmelCase__ : Any = OpenAIGPTTokenizerFast UpperCAmelCase__ : Union[str, Any] = True UpperCAmelCase__ : Optional[Any] = False def lowerCamelCase ( self : str): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase_ = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] UpperCAmelCase_ = dict(zip(_snake_case , range(len(_snake_case)))) UpperCAmelCase_ = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', ''''''] UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file''']) UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file''']) with open(self.vocab_file , '''w''') as fp: fp.write(json.dumps(_snake_case)) with open(self.merges_file , '''w''') as fp: fp.write('''\n'''.join(_snake_case)) def lowerCamelCase ( self : Optional[int] , _snake_case : List[Any]): """simple docstring""" return "lower newer", "lower newer" def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file) UpperCAmelCase_ = '''lower''' UpperCAmelCase_ = ['''low''', '''er</w>'''] UpperCAmelCase_ = tokenizer.tokenize(_snake_case) self.assertListEqual(_snake_case , _snake_case) UpperCAmelCase_ = tokens + ['''<unk>'''] UpperCAmelCase_ = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case) , _snake_case) def lowerCamelCase ( self : int , _snake_case : List[Any]=15): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""): UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case) # Simple input UpperCAmelCase_ = '''This is a simple input''' UpperCAmelCase_ = ['''This is a simple input 1''', '''This is a simple input 2'''] UpperCAmelCase_ = ('''This is a simple input''', '''This is a pair''') UpperCAmelCase_ = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(_snake_case , tokenizer_r.encode , _snake_case , max_length=_snake_case , padding='''max_length''') # Simple input self.assertRaises(_snake_case , tokenizer_r.encode_plus , _snake_case , max_length=_snake_case , padding='''max_length''') # Simple input self.assertRaises( _snake_case , tokenizer_r.batch_encode_plus , _snake_case , max_length=_snake_case , padding='''max_length''' , ) # Pair input self.assertRaises(_snake_case , tokenizer_r.encode , _snake_case , max_length=_snake_case , padding='''max_length''') # Pair input self.assertRaises(_snake_case , tokenizer_r.encode_plus , _snake_case , max_length=_snake_case , padding='''max_length''') # Pair input self.assertRaises( _snake_case , tokenizer_r.batch_encode_plus , _snake_case , max_length=_snake_case , padding='''max_length''' , ) def lowerCamelCase ( self : Tuple): """simple docstring""" pass @require_ftfy @require_spacy @require_tokenizers class __snake_case ( a ): pass
354
from timeit import timeit def A (__A : int ) -> int: """simple docstring""" if number < 0: raise ValueError('''the value of input must not be negative''' ) UpperCAmelCase_ = 0 while number: number &= number - 1 result += 1 return result def A (__A : int ) -> int: """simple docstring""" if number < 0: raise ValueError('''the value of input must not be negative''' ) UpperCAmelCase_ = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def A () -> None: """simple docstring""" def do_benchmark(__A : int ) -> None: UpperCAmelCase_ = '''import __main__ as z''' print(F"""Benchmark when {number = }:""" ) print(F"""{get_set_bits_count_using_modulo_operator(__A ) = }""" ) UpperCAmelCase_ = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=__A ) print(F"""timeit() runs in {timing} seconds""" ) print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__A ) = }""" ) UpperCAmelCase_ = timeit( '''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=__A , ) print(F"""timeit() runs in {timing} seconds""" ) for number in (25, 37, 58, 0): do_benchmark(__A ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
7
0
from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def A (__A : Namespace ) -> Dict: """simple docstring""" return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) snake_case_ : int = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n" class __snake_case ( a ): @staticmethod def lowerCamelCase ( _snake_case : ArgumentParser): """simple docstring""" UpperCAmelCase_ = parser.add_parser( '''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , ) train_parser.add_argument('''--model_type''' , type=_snake_case , required=_snake_case , help='''Model\'s type.''') train_parser.add_argument( '''--tf_checkpoint''' , type=_snake_case , required=_snake_case , help='''TensorFlow checkpoint path or folder.''') train_parser.add_argument( '''--pytorch_dump_output''' , type=_snake_case , required=_snake_case , help='''Path to the PyTorch saved model output.''') train_parser.add_argument('''--config''' , type=_snake_case , default='''''' , help='''Configuration file path or folder.''') train_parser.add_argument( '''--finetuning_task_name''' , type=_snake_case , default=_snake_case , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , ) train_parser.set_defaults(func=_snake_case) def __init__( self : Optional[int] , _snake_case : str , _snake_case : str , _snake_case : str , _snake_case : str , _snake_case : str , *_snake_case : List[Any] , ): """simple docstring""" UpperCAmelCase_ = logging.get_logger('''transformers-cli/converting''') self._logger.info(F"""Loading model {model_type}""") UpperCAmelCase_ = model_type UpperCAmelCase_ = tf_checkpoint UpperCAmelCase_ = pytorch_dump_output UpperCAmelCase_ = config UpperCAmelCase_ = finetuning_task_name def lowerCamelCase ( self : Optional[Any]): """simple docstring""" if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_snake_case) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_snake_case) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_snake_case) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(_snake_case) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_snake_case) if "ckpt" in self._tf_checkpoint.lower(): UpperCAmelCase_ = self._tf_checkpoint UpperCAmelCase_ = '''''' else: UpperCAmelCase_ = self._tf_checkpoint UpperCAmelCase_ = '''''' convert_transfo_xl_checkpoint_to_pytorch( _snake_case , self._config , self._pytorch_dump_output , _snake_case) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_snake_case) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_snake_case) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) else: raise ValueError( '''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''')
355
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class __snake_case ( unittest.TestCase ): def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = 10 def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = [1, 2, 3, 4] UpperCAmelCase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case) def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case) def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case) def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = '''It was the year of Our Lord one thousand seven hundred and seventy-five.\n\nSpiritual revelations were conceded to England at that favoured period, as at this.''' UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case) self.assertEqual(_snake_case , []) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = '''''' UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case) self.assertEqual(_snake_case , []) self.assertEqual(_snake_case , []) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = ( '''It was the year of Our Lord one thousand seven hundred and ''' '''seventy-five\n\nSpiritual revelations were conceded to England ''' '''at that favoured period, as at this.\n@highlight\n\nIt was the best of times''' ) UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case) UpperCAmelCase_ = [ '''It was the year of Our Lord one thousand seven hundred and seventy-five.''', '''Spiritual revelations were conceded to England at that favoured period, as at this.''', ] self.assertEqual(_snake_case , _snake_case) UpperCAmelCase_ = ['''It was the best of times.'''] self.assertEqual(_snake_case , _snake_case) def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = torch.tensor([1, 2, 3, 4]) UpperCAmelCase_ = torch.tensor([1, 1, 1, 1]) np.testing.assert_array_equal(build_mask(_snake_case , 0).numpy() , expected.numpy()) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = torch.tensor([1, 2, 3, 4, 23, 23, 23]) UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0]) np.testing.assert_array_equal(build_mask(_snake_case , 23).numpy() , expected.numpy()) def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1]) UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0]) np.testing.assert_array_equal(build_mask(_snake_case , 1).numpy() , expected.numpy()) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = 101 UpperCAmelCase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]]) UpperCAmelCase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]]) UpperCAmelCase_ = compute_token_type_ids(_snake_case , _snake_case) np.testing.assert_array_equal(_snake_case , _snake_case)
7
0
import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel snake_case_ : Union[str, Any] = "0.12" # assumed parallelism: 8 @require_flax @is_staging_test class __snake_case ( unittest.TestCase ): @classmethod def lowerCamelCase ( cls : Tuple): """simple docstring""" UpperCAmelCase_ = TOKEN HfFolder.save_token(_snake_case) @classmethod def lowerCamelCase ( cls : List[Any]): """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-model-flax''') except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''') except HTTPError: pass def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37) UpperCAmelCase_ = FlaxBertModel(_snake_case) model.push_to_hub('''test-model-flax''' , use_auth_token=self._token) UpperCAmelCase_ = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""") UpperCAmelCase_ = flatten_dict(unfreeze(model.params)) UpperCAmelCase_ = flatten_dict(unfreeze(new_model.params)) for key in base_params.keys(): UpperCAmelCase_ = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_snake_case , 1e-3 , msg=F"""{key} not identical""") # Reset repo delete_repo(token=self._token , repo_id='''test-model-flax''') # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(_snake_case , repo_id='''test-model-flax''' , push_to_hub=_snake_case , use_auth_token=self._token) UpperCAmelCase_ = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""") UpperCAmelCase_ = flatten_dict(unfreeze(model.params)) UpperCAmelCase_ = flatten_dict(unfreeze(new_model.params)) for key in base_params.keys(): UpperCAmelCase_ = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_snake_case , 1e-3 , msg=F"""{key} not identical""") def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37) UpperCAmelCase_ = FlaxBertModel(_snake_case) model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token) UpperCAmelCase_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''') UpperCAmelCase_ = flatten_dict(unfreeze(model.params)) UpperCAmelCase_ = flatten_dict(unfreeze(new_model.params)) for key in base_params.keys(): UpperCAmelCase_ = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_snake_case , 1e-3 , msg=F"""{key} not identical""") # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''') # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( _snake_case , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_snake_case , use_auth_token=self._token) UpperCAmelCase_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''') UpperCAmelCase_ = flatten_dict(unfreeze(model.params)) UpperCAmelCase_ = flatten_dict(unfreeze(new_model.params)) for key in base_params.keys(): UpperCAmelCase_ = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_snake_case , 1e-3 , msg=F"""{key} not identical""") def A (__A : Optional[int] , __A : Optional[Any] ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ = True UpperCAmelCase_ = flatten_dict(modela.params ) UpperCAmelCase_ = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4: UpperCAmelCase_ = False return models_are_equal @require_flax class __snake_case ( unittest.TestCase ): def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''') UpperCAmelCase_ = FlaxBertModel(_snake_case) UpperCAmelCase_ = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_snake_case , _snake_case)) with self.assertRaises(_snake_case): UpperCAmelCase_ = FlaxBertModel.from_pretrained(_snake_case) UpperCAmelCase_ = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case) self.assertTrue(check_models_equal(_snake_case , _snake_case)) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''') UpperCAmelCase_ = FlaxBertModel(_snake_case) UpperCAmelCase_ = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_snake_case , _snake_case) , max_shard_size='''10KB''') with self.assertRaises(_snake_case): UpperCAmelCase_ = FlaxBertModel.from_pretrained(_snake_case) UpperCAmelCase_ = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case) self.assertTrue(check_models_equal(_snake_case , _snake_case)) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = '''bert''' UpperCAmelCase_ = '''hf-internal-testing/tiny-random-bert-subfolder''' with self.assertRaises(_snake_case): UpperCAmelCase_ = FlaxBertModel.from_pretrained(_snake_case) UpperCAmelCase_ = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case) self.assertIsNotNone(_snake_case) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = '''bert''' UpperCAmelCase_ = '''hf-internal-testing/tiny-random-bert-sharded-subfolder''' with self.assertRaises(_snake_case): UpperCAmelCase_ = FlaxBertModel.from_pretrained(_snake_case) UpperCAmelCase_ = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case) self.assertIsNotNone(_snake_case)
356
import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): snake_case_ : Any = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right snake_case_ : Optional[Any] = 128022 snake_case_ : Optional[int] = 128028 @require_sentencepiece class __snake_case ( a , unittest.TestCase ): UpperCAmelCase__ : List[str] = MaMaaaTokenizer UpperCAmelCase__ : int = False UpperCAmelCase__ : Dict = False UpperCAmelCase__ : List[str] = True def lowerCamelCase ( self : str): """simple docstring""" super().setUp() UpperCAmelCase_ = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>'''] UpperCAmelCase_ = dict(zip(_snake_case , range(len(_snake_case)))) UpperCAmelCase_ = Path(self.tmpdirname) save_json(_snake_case , save_dir / VOCAB_FILES_NAMES['''vocab_file''']) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(_snake_case , save_dir / VOCAB_FILES_NAMES['''spm_file''']) UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname) def lowerCamelCase ( self : str , **_snake_case : Union[str, Any]): """simple docstring""" return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_snake_case) def lowerCamelCase ( self : Optional[int] , _snake_case : List[str]): """simple docstring""" return ( "This is a test", "This is a test", ) def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ = '''</s>''' UpperCAmelCase_ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case) , _snake_case) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case) , _snake_case) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = list(tokenizer.get_vocab().keys()) self.assertEqual(vocab_keys[0] , '''</s>''') self.assertEqual(vocab_keys[1] , '''<unk>''') self.assertEqual(vocab_keys[-1] , '''<s>''') self.assertEqual(len(_snake_case) , tokenizer.vocab_size + len(tokenizer.get_added_vocab())) @unittest.skip('''Skip this test while all models are still to be uploaded.''') def lowerCamelCase ( self : Optional[int]): """simple docstring""" pass def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = tokenizer.tokenize('''This is a test''') self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']) self.assertListEqual( tokenizer.convert_tokens_to_ids(_snake_case) , [2, 3, 4, 5, 6] , ) UpperCAmelCase_ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6]) self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']) UpperCAmelCase_ = tokenizer.convert_tokens_to_string(_snake_case) self.assertEqual(_snake_case , '''This is a test''') @slow def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = {'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_snake_case , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , ) @require_torch @require_sentencepiece @require_tokenizers class __snake_case ( unittest.TestCase ): UpperCAmelCase__ : Dict = '''facebook/m2m100_418M''' UpperCAmelCase__ : Dict = [ '''In my opinion, there are two levels of response from the French government.''', '''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''', ] UpperCAmelCase__ : Dict = [ '''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''', '''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''', ] # fmt: off UpperCAmelCase__ : Any = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2] @classmethod def lowerCamelCase ( cls : Optional[Any]): """simple docstring""" UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''') UpperCAmelCase_ = 1 return cls def lowerCamelCase ( self : List[Any]): """simple docstring""" self.assertEqual(self.tokenizer.get_lang_id('''ar''') , 128006) self.assertEqual(self.tokenizer.get_lang_id('''en''') , 128022) self.assertEqual(self.tokenizer.get_lang_id('''ro''') , 128076) self.assertEqual(self.tokenizer.get_lang_id('''mr''') , 128063) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = self.tokenizer.get_vocab() self.assertEqual(len(_snake_case) , self.tokenizer.vocab_size) self.assertEqual(vocab['''<unk>'''] , 3) self.assertIn(self.tokenizer.get_lang_token('''en''') , _snake_case) def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = '''en''' UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0] self.assertListEqual(self.expected_src_tokens , _snake_case) def lowerCamelCase ( self : Any): """simple docstring""" self.assertIn(_snake_case , self.tokenizer.all_special_ids) # fmt: off UpperCAmelCase_ = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2] # fmt: on UpperCAmelCase_ = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case) UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_snake_case) self.assertEqual(_snake_case , _snake_case) self.assertNotIn(self.tokenizer.eos_token , _snake_case) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = tempfile.mkdtemp() UpperCAmelCase_ = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(_snake_case) UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(_snake_case) self.assertDictEqual(new_tok.lang_token_to_id , _snake_case) @require_torch def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = '''en''' UpperCAmelCase_ = '''fr''' UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_snake_case , return_tensors='''pt''') UpperCAmelCase_ = shift_tokens_right( batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id) for k in batch: UpperCAmelCase_ = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = '''mr''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')]) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id]) UpperCAmelCase_ = '''zh''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')]) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id]) @require_torch def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = '''mr''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')]) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id]) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)]) UpperCAmelCase_ = '''zh''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')]) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id]) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)]) @require_torch def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''') self.assertEqual( nested_simplify(_snake_case) , { # en_XX, A, test, EOS '''input_ids''': [[128022, 58, 4183, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 128006, } , )
7
0
def A (__A : str ) -> str: """simple docstring""" UpperCAmelCase_ = 0 # if input_string is "aba" than new_input_string become "a|b|a" UpperCAmelCase_ = '''''' UpperCAmelCase_ = '''''' # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(__A ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring UpperCAmelCase_ , UpperCAmelCase_ = 0, 0 # length[i] shows the length of palindromic substring with center i UpperCAmelCase_ = [1 for i in range(len(__A ) )] # for each character in new_string find corresponding palindromic string UpperCAmelCase_ = 0 for j in range(len(__A ) ): UpperCAmelCase_ = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(__A ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 UpperCAmelCase_ = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: UpperCAmelCase_ = j - k + 1 # noqa: E741 UpperCAmelCase_ = j + k - 1 # update max_length and start position if max_length < length[j]: UpperCAmelCase_ = length[j] UpperCAmelCase_ = j # create that string UpperCAmelCase_ = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
357
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING snake_case_ : List[str] = logging.get_logger(__name__) @add_end_docstrings(a ) class __snake_case ( a ): def __init__( self : Tuple , *_snake_case : List[Any] , **_snake_case : Optional[Any]): """simple docstring""" super().__init__(*_snake_case , **_snake_case) self.check_model_type(_snake_case) def lowerCamelCase ( self : List[str] , _snake_case : Optional[int]=None , _snake_case : Optional[Any]=None , _snake_case : str=None , **_snake_case : Optional[Any]): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = {}, {} if padding is not None: UpperCAmelCase_ = padding if truncation is not None: UpperCAmelCase_ = truncation if top_k is not None: UpperCAmelCase_ = top_k return preprocess_params, {}, postprocess_params def __call__( self : List[Any] , _snake_case : Union["Image.Image", str] , _snake_case : str = None , **_snake_case : str): """simple docstring""" if isinstance(_snake_case , (Image.Image, str)) and isinstance(_snake_case , _snake_case): UpperCAmelCase_ = {'''image''': image, '''question''': question} else: UpperCAmelCase_ = image UpperCAmelCase_ = super().__call__(_snake_case , **_snake_case) return results def lowerCamelCase ( self : Union[str, Any] , _snake_case : int , _snake_case : Optional[int]=False , _snake_case : int=False): """simple docstring""" UpperCAmelCase_ = load_image(inputs['''image''']) UpperCAmelCase_ = self.tokenizer( inputs['''question'''] , return_tensors=self.framework , padding=_snake_case , truncation=_snake_case) UpperCAmelCase_ = self.image_processor(images=_snake_case , return_tensors=self.framework) model_inputs.update(_snake_case) return model_inputs def lowerCamelCase ( self : List[Any] , _snake_case : Optional[Any]): """simple docstring""" UpperCAmelCase_ = self.model(**_snake_case) return model_outputs def lowerCamelCase ( self : str , _snake_case : Optional[Any] , _snake_case : List[str]=5): """simple docstring""" if top_k > self.model.config.num_labels: UpperCAmelCase_ = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase_ = model_outputs.logits.sigmoid()[0] UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(_snake_case) else: raise ValueError(F"""Unsupported framework: {self.framework}""") UpperCAmelCase_ = scores.tolist() UpperCAmelCase_ = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_snake_case , _snake_case)]
7
0
import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class __snake_case : def __init__( self : List[Any] , _snake_case : List[Any] , _snake_case : Optional[int]=2 , _snake_case : str=8 , _snake_case : int=True , _snake_case : List[Any]=True , _snake_case : Dict=True , _snake_case : int=True , _snake_case : Any=99 , _snake_case : Union[str, Any]=16 , _snake_case : Any=5 , _snake_case : Union[str, Any]=2 , _snake_case : List[Any]=36 , _snake_case : Union[str, Any]="gelu" , _snake_case : List[Any]=0.0 , _snake_case : Union[str, Any]=0.0 , _snake_case : Union[str, Any]=512 , _snake_case : Optional[Any]=16 , _snake_case : int=2 , _snake_case : List[Any]=0.0_2 , _snake_case : List[str]=3 , _snake_case : Any=4 , _snake_case : Optional[Any]=None , ): """simple docstring""" UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = seq_length UpperCAmelCase_ = is_training UpperCAmelCase_ = use_input_mask UpperCAmelCase_ = use_token_type_ids UpperCAmelCase_ = use_labels UpperCAmelCase_ = vocab_size UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = type_vocab_size UpperCAmelCase_ = type_sequence_label_size UpperCAmelCase_ = initializer_range UpperCAmelCase_ = num_labels UpperCAmelCase_ = num_choices UpperCAmelCase_ = scope def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) UpperCAmelCase_ = None if self.use_input_mask: UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length]) UpperCAmelCase_ = None if self.use_token_type_ids: UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = None if self.use_labels: UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size) UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices) UpperCAmelCase_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase ( self : List[str]): """simple docstring""" return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , ) def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ = self.get_config() UpperCAmelCase_ = 300 return config def lowerCamelCase ( self : Dict): """simple docstring""" ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) = self.prepare_config_and_inputs() UpperCAmelCase_ = True UpperCAmelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def lowerCamelCase ( self : List[Any] , _snake_case : Union[str, Any] , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : List[str] , _snake_case : List[str] , _snake_case : Tuple , _snake_case : int): """simple docstring""" UpperCAmelCase_ = MraModel(config=_snake_case) model.to(_snake_case) model.eval() UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case) UpperCAmelCase_ = model(_snake_case , token_type_ids=_snake_case) UpperCAmelCase_ = model(_snake_case) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def lowerCamelCase ( self : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : int , _snake_case : List[str] , _snake_case : str , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : Tuple , _snake_case : Dict , ): """simple docstring""" UpperCAmelCase_ = True UpperCAmelCase_ = MraModel(_snake_case) model.to(_snake_case) model.eval() UpperCAmelCase_ = model( _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , ) UpperCAmelCase_ = model( _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , encoder_hidden_states=_snake_case , ) UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def lowerCamelCase ( self : List[Any] , _snake_case : Optional[int] , _snake_case : int , _snake_case : str , _snake_case : int , _snake_case : List[str] , _snake_case : Union[str, Any] , _snake_case : int): """simple docstring""" UpperCAmelCase_ = MraForMaskedLM(config=_snake_case) model.to(_snake_case) model.eval() UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def lowerCamelCase ( self : List[Any] , _snake_case : Tuple , _snake_case : Tuple , _snake_case : Tuple , _snake_case : str , _snake_case : List[str] , _snake_case : int , _snake_case : Any): """simple docstring""" UpperCAmelCase_ = MraForQuestionAnswering(config=_snake_case) model.to(_snake_case) model.eval() UpperCAmelCase_ = model( _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , start_positions=_snake_case , end_positions=_snake_case , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def lowerCamelCase ( self : List[str] , _snake_case : str , _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : int , _snake_case : List[Any] , _snake_case : int): """simple docstring""" UpperCAmelCase_ = self.num_labels UpperCAmelCase_ = MraForSequenceClassification(_snake_case) model.to(_snake_case) model.eval() UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def lowerCamelCase ( self : Dict , _snake_case : Dict , _snake_case : int , _snake_case : List[Any] , _snake_case : int , _snake_case : List[Any] , _snake_case : Any , _snake_case : int): """simple docstring""" UpperCAmelCase_ = self.num_labels UpperCAmelCase_ = MraForTokenClassification(config=_snake_case) model.to(_snake_case) model.eval() UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def lowerCamelCase ( self : str , _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : str , _snake_case : Any , _snake_case : Any , _snake_case : Tuple): """simple docstring""" UpperCAmelCase_ = self.num_choices UpperCAmelCase_ = MraForMultipleChoice(config=_snake_case) model.to(_snake_case) model.eval() UpperCAmelCase_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() UpperCAmelCase_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() UpperCAmelCase_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() UpperCAmelCase_ = model( _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) = config_and_inputs UpperCAmelCase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __snake_case ( a , unittest.TestCase ): UpperCAmelCase__ : List[Any] = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) UpperCAmelCase__ : int = False UpperCAmelCase__ : str = False UpperCAmelCase__ : Optional[Any] = False UpperCAmelCase__ : Dict = False UpperCAmelCase__ : Any = () def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = MraModelTester(self) UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , hidden_size=37) def lowerCamelCase ( self : Optional[int]): """simple docstring""" self.config_tester.run_common_tests() def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase_ = type self.model_tester.create_and_check_model(*_snake_case) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_snake_case) def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_snake_case) def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_snake_case) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_snake_case) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_snake_case) @slow def lowerCamelCase ( self : List[Any]): """simple docstring""" for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ = MraModel.from_pretrained(_snake_case) self.assertIsNotNone(_snake_case) @unittest.skip(reason='''MRA does not output attentions''') def lowerCamelCase ( self : int): """simple docstring""" return @require_torch class __snake_case ( unittest.TestCase ): @slow def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = MraModel.from_pretrained('''uw-madison/mra-base-512-4''') UpperCAmelCase_ = torch.arange(256).unsqueeze(0) with torch.no_grad(): UpperCAmelCase_ = model(_snake_case)[0] UpperCAmelCase_ = torch.Size((1, 256, 768)) self.assertEqual(output.shape , _snake_case) UpperCAmelCase_ = torch.tensor( [[[-0.0_1_4_0, 0.0_8_3_0, -0.0_3_8_1], [0.1_5_4_6, 0.1_4_0_2, 0.0_2_2_0], [0.1_1_6_2, 0.0_8_5_1, 0.0_1_6_5]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , _snake_case , atol=1e-4)) @slow def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''') UpperCAmelCase_ = torch.arange(256).unsqueeze(0) with torch.no_grad(): UpperCAmelCase_ = model(_snake_case)[0] UpperCAmelCase_ = 50265 UpperCAmelCase_ = torch.Size((1, 256, vocab_size)) self.assertEqual(output.shape , _snake_case) UpperCAmelCase_ = torch.tensor( [[[9.2_5_9_5, -3.6_0_3_8, 11.8819], [9.3_8_6_9, -3.2_6_9_3, 11.0956], [11.8524, -3.4_9_3_8, 13.1210]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , _snake_case , atol=1e-4)) @slow def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''') UpperCAmelCase_ = torch.arange(4096).unsqueeze(0) with torch.no_grad(): UpperCAmelCase_ = model(_snake_case)[0] UpperCAmelCase_ = 50265 UpperCAmelCase_ = torch.Size((1, 4096, vocab_size)) self.assertEqual(output.shape , _snake_case) UpperCAmelCase_ = torch.tensor( [[[5.4_7_8_9, -2.3_5_6_4, 7.5_0_6_4], [7.9_0_6_7, -1.3_3_6_9, 9.9_6_6_8], [9.0_7_1_2, -1.8_1_0_6, 7.0_3_8_0]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , _snake_case , atol=1e-4))
358
import sys def A (__A : int ) -> Dict: """simple docstring""" UpperCAmelCase_ = len(__A ) UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )] UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )] for chain_length in range(2 , __A ): for a in range(1 , n - chain_length + 1 ): UpperCAmelCase_ = a + chain_length - 1 UpperCAmelCase_ = sys.maxsize for c in range(__A , __A ): UpperCAmelCase_ = ( matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b] ) if cost < matrix[a][b]: UpperCAmelCase_ = cost UpperCAmelCase_ = c return matrix, sol def A (__A : Any , __A : Dict , __A : Optional[int] ) -> Optional[int]: """simple docstring""" if i == j: print('''A''' + str(__A ) , end=''' ''' ) else: print('''(''' , end=''' ''' ) print_optiomal_solution(__A , __A , optimal_solution[i][j] ) print_optiomal_solution(__A , optimal_solution[i][j] + 1 , __A ) print(''')''' , end=''' ''' ) def A () -> List[str]: """simple docstring""" UpperCAmelCase_ = [30, 35, 15, 5, 10, 20, 25] UpperCAmelCase_ = len(__A ) # Size of matrix created from above array will be # 30*35 35*15 15*5 5*10 10*20 20*25 UpperCAmelCase_ , UpperCAmelCase_ = matrix_chain_order(__A ) print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) ) print_optiomal_solution(__A , 1 , n - 1 ) if __name__ == "__main__": main()
7
0
"""simple docstring""" import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class __snake_case ( unittest.TestCase , a ): def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = load_tool('''text-to-speech''') self.tool.setup() def lowerCamelCase ( self : int): """simple docstring""" torch.manual_seed(0) UpperCAmelCase_ = self.tool('''hey''') UpperCAmelCase_ = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , )) def lowerCamelCase ( self : Any): """simple docstring""" torch.manual_seed(0) UpperCAmelCase_ = self.tool('''hey''') UpperCAmelCase_ = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , ))
359
import inspect import os import re from transformers.configuration_utils import PretrainedConfig from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py snake_case_ : int = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. snake_case_ : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS) snake_case_ : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING snake_case_ : Union[str, Any] = { # used to compute the property `self.chunk_length` "EncodecConfig": ["overlap"], # used as `self.bert_model = BertModel(config, ...)` "DPRConfig": True, # not used in modeling files, but it's an important information "FSMTConfig": ["langs"], # used internally in the configuration class file "GPTNeoConfig": ["attention_types"], # used internally in the configuration class file "EsmConfig": ["is_folding_model"], # used during training (despite we don't have training script for these models yet) "Mask2FormerConfig": ["ignore_value"], # `ignore_value` used during training (despite we don't have training script for these models yet) # `norm` used in conversion script (despite not using in the modeling file) "OneFormerConfig": ["ignore_value", "norm"], # used during preprocessing and collation, see `collating_graphormer.py` "GraphormerConfig": ["spatial_pos_max"], # used internally in the configuration class file "T5Config": ["feed_forward_proj"], # used internally in the configuration class file # `tokenizer_class` get default value `T5Tokenizer` intentionally "MT5Config": ["feed_forward_proj", "tokenizer_class"], "UMT5Config": ["feed_forward_proj", "tokenizer_class"], # used internally in the configuration class file "LongT5Config": ["feed_forward_proj"], # used internally in the configuration class file "SwitchTransformersConfig": ["feed_forward_proj"], # having default values other than `1e-5` - we can't fix them without breaking "BioGptConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "GLPNConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "SegformerConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "CvtConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "PerceiverConfig": ["layer_norm_eps"], # used internally to calculate the feature size "InformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate the feature size "TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate the feature size "AutoformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate `mlp_dim` "SamVisionConfig": ["mlp_ratio"], # For (head) training, but so far not implemented "ClapAudioConfig": ["num_classes"], # Not used, but providing useful information to users "SpeechT5HifiGanConfig": ["sampling_rate"], } # TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure SPECIAL_CASES_TO_ALLOW.update( { "CLIPSegConfig": True, "DeformableDetrConfig": True, "DetaConfig": True, "DinatConfig": True, "DonutSwinConfig": True, "EfficientFormerConfig": True, "FSMTConfig": True, "JukeboxConfig": True, "LayoutLMv2Config": True, "MaskFormerSwinConfig": True, "MT5Config": True, "NatConfig": True, "OneFormerConfig": True, "PerceiverConfig": True, "RagConfig": True, "SpeechT5Config": True, "SwinConfig": True, "Swin2SRConfig": True, "Swinv2Config": True, "SwitchTransformersConfig": True, "TableTransformerConfig": True, "TapasConfig": True, "TransfoXLConfig": True, "UniSpeechConfig": True, "UniSpeechSatConfig": True, "WavLMConfig": True, "WhisperConfig": True, # TODO: @Arthur (for `alignment_head` and `alignment_layer`) "JukeboxPriorConfig": True, # TODO: @Younes (for `is_decoder`) "Pix2StructTextConfig": True, } ) def A (__A : List[Any] , __A : Optional[int] , __A : str , __A : Dict ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ = False for attribute in attributes: for modeling_source in source_strings: # check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)` if ( F"""config.{attribute}""" in modeling_source or F"""getattr(config, \"{attribute}\"""" in modeling_source or F"""getattr(self.config, \"{attribute}\"""" in modeling_source ): UpperCAmelCase_ = True # Deal with multi-line cases elif ( re.search( RF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , __A , ) is not None ): UpperCAmelCase_ = True # `SequenceSummary` is called with `SequenceSummary(config)` elif attribute in [ "summary_type", "summary_use_proj", "summary_activation", "summary_last_dropout", "summary_proj_to_labels", "summary_first_dropout", ]: if "SequenceSummary" in modeling_source: UpperCAmelCase_ = True if attribute_used: break if attribute_used: break # common and important attributes, even if they do not always appear in the modeling files UpperCAmelCase_ = [ '''bos_index''', '''eos_index''', '''pad_index''', '''unk_index''', '''mask_index''', '''image_size''', '''use_cache''', '''out_features''', '''out_indices''', ] UpperCAmelCase_ = ['''encoder_no_repeat_ngram_size'''] # Special cases to be allowed UpperCAmelCase_ = True if not attribute_used: UpperCAmelCase_ = False for attribute in attributes: # Allow if the default value in the configuration class is different from the one in `PretrainedConfig` if attribute in ["is_encoder_decoder"] and default_value is True: UpperCAmelCase_ = True elif attribute in ["tie_word_embeddings"] and default_value is False: UpperCAmelCase_ = True # Allow cases without checking the default value in the configuration class elif attribute in attributes_to_allow + attributes_used_in_generation: UpperCAmelCase_ = True elif attribute.endswith('''_token_id''' ): UpperCAmelCase_ = True # configuration class specific cases if not case_allowed: UpperCAmelCase_ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] ) UpperCAmelCase_ = allowed_cases is True or attribute in allowed_cases return attribute_used or case_allowed def A (__A : Tuple ) -> List[Any]: """simple docstring""" UpperCAmelCase_ = dict(inspect.signature(config_class.__init__ ).parameters ) UpperCAmelCase_ = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']] UpperCAmelCase_ = [signature[param].default for param in parameter_names] # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long # as one variant is used, the test should pass UpperCAmelCase_ = {} if len(config_class.attribute_map ) > 0: UpperCAmelCase_ = {v: k for k, v in config_class.attribute_map.items()} # Get the path to modeling source files UpperCAmelCase_ = inspect.getsourcefile(__A ) UpperCAmelCase_ = os.path.dirname(__A ) # Let's check against all frameworks: as long as one framework uses an attribute, we are good. UpperCAmelCase_ = [os.path.join(__A , __A ) for fn in os.listdir(__A ) if fn.startswith('''modeling_''' )] # Get the source code strings UpperCAmelCase_ = [] for path in modeling_paths: if os.path.isfile(__A ): with open(__A ) as fp: modeling_sources.append(fp.read() ) UpperCAmelCase_ = [] for config_param, default_value in zip(__A , __A ): # `attributes` here is all the variant names for `config_param` UpperCAmelCase_ = [config_param] # some configuration classes have non-empty `attribute_map`, and both names could be used in the # corresponding modeling files. As long as one of them appears, it is fine. if config_param in reversed_attribute_map: attributes.append(reversed_attribute_map[config_param] ) if not check_attribute_being_used(__A , __A , __A , __A ): unused_attributes.append(attributes[0] ) return sorted(__A ) def A () -> Any: """simple docstring""" UpperCAmelCase_ = {} for _config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in _config_class.__module__: continue # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.) UpperCAmelCase_ = [ cls for name, cls in inspect.getmembers( inspect.getmodule(_config_class ) , lambda __A : inspect.isclass(__A ) and issubclass(__A , __A ) and inspect.getmodule(__A ) == inspect.getmodule(_config_class ) , ) ] for config_class in config_classes_in_module: UpperCAmelCase_ = check_config_attributes_being_used(__A ) if len(__A ) > 0: UpperCAmelCase_ = unused_attributes if len(__A ) > 0: UpperCAmelCase_ = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n''' for name, attributes in configs_with_unused_attributes.items(): error += F"""{name}: {attributes}\n""" raise ValueError(__A ) if __name__ == "__main__": check_config_attributes()
7
0
import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging snake_case_ : Optional[int] = { "cola": 2, "mnli": 3, "mrpc": 2, "sst-2": 2, "sts-b": 1, "qqp": 2, "qnli": 2, "rte": 2, "wnli": 2, } logging.set_verbosity_info() def A (__A : Tuple , __A : List[str] , __A : Optional[Any] , __A : Optional[int]=None ) -> int: """simple docstring""" UpperCAmelCase_ = XLNetConfig.from_json_file(__A ) UpperCAmelCase_ = finetuning_task.lower() if finetuning_task is not None else '''''' if finetuning_task in GLUE_TASKS_NUM_LABELS: print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" ) UpperCAmelCase_ = finetuning_task UpperCAmelCase_ = GLUE_TASKS_NUM_LABELS[finetuning_task] UpperCAmelCase_ = XLNetForSequenceClassification(__A ) elif "squad" in finetuning_task: UpperCAmelCase_ = finetuning_task UpperCAmelCase_ = XLNetForQuestionAnswering(__A ) else: UpperCAmelCase_ = XLNetLMHeadModel(__A ) # Load weights from tf checkpoint load_tf_weights_in_xlnet(__A , __A , __A ) # Save pytorch-model UpperCAmelCase_ = os.path.join(__A , __A ) UpperCAmelCase_ = os.path.join(__A , __A ) print(F"""Save PyTorch model to {os.path.abspath(__A )}""" ) torch.save(model.state_dict() , __A ) print(F"""Save configuration file to {os.path.abspath(__A )}""" ) with open(__A , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": snake_case_ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--xlnet_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained XLNet model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the folder to store the PyTorch model or dataset/vocab.', ) parser.add_argument( '--finetuning_task', default=None, type=str, help='Name of a task on which the XLNet TensorFlow model was fine-tuned', ) snake_case_ : int = parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
360
import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class __snake_case ( a , unittest.TestCase ): UpperCAmelCase__ : Optional[Any] = FlaxAutoencoderKL @property def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = 4 UpperCAmelCase_ = 3 UpperCAmelCase_ = (32, 32) UpperCAmelCase_ = jax.random.PRNGKey(0) UpperCAmelCase_ = jax.random.uniform(_snake_case , ((batch_size, num_channels) + sizes)) return {"sample": image, "prng_key": prng_key} def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = { '''block_out_channels''': [32, 64], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 4, } UpperCAmelCase_ = self.dummy_input return init_dict, inputs_dict
7
0
from math import factorial, pi def A (__A : float , __A : int = 30 ) -> float: """simple docstring""" if not isinstance(__A , (int, float) ): raise ValueError('''maclaurin_sin() requires either an int or float for theta''' ) if not isinstance(__A , __A ) or accuracy <= 0: raise ValueError('''maclaurin_sin() requires a positive int for accuracy''' ) UpperCAmelCase_ = float(__A ) UpperCAmelCase_ = theta // (2 * pi) theta -= 2 * div * pi return sum( (-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__A ) ) def A (__A : float , __A : int = 30 ) -> float: """simple docstring""" if not isinstance(__A , (int, float) ): raise ValueError('''maclaurin_cos() requires either an int or float for theta''' ) if not isinstance(__A , __A ) or accuracy <= 0: raise ValueError('''maclaurin_cos() requires a positive int for accuracy''' ) UpperCAmelCase_ = float(__A ) UpperCAmelCase_ = theta // (2 * pi) theta -= 2 * div * pi return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__A ) ) if __name__ == "__main__": import doctest doctest.testmod() print(maclaurin_sin(10)) print(maclaurin_sin(-10)) print(maclaurin_sin(10, 15)) print(maclaurin_sin(-10, 15)) print(maclaurin_cos(5)) print(maclaurin_cos(-5)) print(maclaurin_cos(10, 15)) print(maclaurin_cos(-10, 15))
361
import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPTaConfig from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 snake_case_ : List[str] = { "return_dict": False, "output_hidden_states": True, "output_attentions": True, "torchscript": True, "torch_dtype": "float16", "use_bfloat16": True, "tf_legacy_loss": True, "pruned_heads": {"a": 1}, "tie_word_embeddings": False, "is_decoder": True, "cross_attention_hidden_size": 128, "add_cross_attention": True, "tie_encoder_decoder": True, "max_length": 50, "min_length": 3, "do_sample": True, "early_stopping": True, "num_beams": 3, "num_beam_groups": 3, "diversity_penalty": 0.5, "temperature": 2.0, "top_k": 10, "top_p": 0.7, "typical_p": 0.2, "repetition_penalty": 0.8, "length_penalty": 0.8, "no_repeat_ngram_size": 5, "encoder_no_repeat_ngram_size": 5, "bad_words_ids": [1, 2, 3], "num_return_sequences": 3, "chunk_size_feed_forward": 5, "output_scores": True, "return_dict_in_generate": True, "forced_bos_token_id": 2, "forced_eos_token_id": 3, "remove_invalid_values": True, "architectures": ["BertModel"], "finetuning_task": "translation", "id2label": {0: "label"}, "label2id": {"label": "0"}, "tokenizer_class": "BertTokenizerFast", "prefix": "prefix", "bos_token_id": 6, "pad_token_id": 7, "eos_token_id": 8, "sep_token_id": 9, "decoder_start_token_id": 10, "exponential_decay_length_penalty": (5, 1.01), "suppress_tokens": [0, 1], "begin_suppress_tokens": 2, "task_specific_params": {"translation": "some_params"}, "problem_type": "regression", } @is_staging_test class __snake_case ( unittest.TestCase ): @classmethod def lowerCamelCase ( cls : Optional[Any]): """simple docstring""" UpperCAmelCase_ = TOKEN HfFolder.save_token(_snake_case) @classmethod def lowerCamelCase ( cls : List[str]): """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-config''') except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''') except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-config''') except HTTPError: pass def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37) config.push_to_hub('''test-config''' , use_auth_token=self._token) UpperCAmelCase_ = BertConfig.from_pretrained(F"""{USER}/test-config""") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_snake_case , getattr(_snake_case , _snake_case)) # Reset repo delete_repo(token=self._token , repo_id='''test-config''') # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(_snake_case , repo_id='''test-config''' , push_to_hub=_snake_case , use_auth_token=self._token) UpperCAmelCase_ = BertConfig.from_pretrained(F"""{USER}/test-config""") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_snake_case , getattr(_snake_case , _snake_case)) def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37) config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token) UpperCAmelCase_ = BertConfig.from_pretrained('''valid_org/test-config-org''') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_snake_case , getattr(_snake_case , _snake_case)) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-config-org''') # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( _snake_case , repo_id='''valid_org/test-config-org''' , push_to_hub=_snake_case , use_auth_token=self._token) UpperCAmelCase_ = BertConfig.from_pretrained('''valid_org/test-config-org''') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_snake_case , getattr(_snake_case , _snake_case)) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" CustomConfig.register_for_auto_class() UpperCAmelCase_ = CustomConfig(attribute=42) config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''}) UpperCAmelCase_ = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=_snake_case) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''') self.assertEqual(new_config.attribute , 42) class __snake_case ( unittest.TestCase ): def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = GPTaConfig() # attempt to modify each of int/float/bool/str config records and verify they were updated UpperCAmelCase_ = c.n_embd + 1 # int UpperCAmelCase_ = c.resid_pdrop + 1.0 # float UpperCAmelCase_ = not c.scale_attn_weights # bool UpperCAmelCase_ = c.summary_type + '''foo''' # str c.update_from_string( F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""") self.assertEqual(_snake_case , c.n_embd , '''mismatch for key: n_embd''') self.assertEqual(_snake_case , c.resid_pdrop , '''mismatch for key: resid_pdrop''') self.assertEqual(_snake_case , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''') self.assertEqual(_snake_case , c.summary_type , '''mismatch for key: summary_type''') def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = PretrainedConfig() UpperCAmelCase_ = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( _snake_case , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version''']) UpperCAmelCase_ = [key for key, value in config_common_kwargs.items() if value == getattr(_snake_case , _snake_case)] if len(_snake_case) > 0: raise ValueError( '''The following keys are set with the default values in''' ''' `test_configuration_common.config_common_kwargs` pick another value for them:''' F""" {", ".join(_snake_case)}.""") def lowerCamelCase ( self : str): """simple docstring""" with self.assertRaises(_snake_case): # config is in subfolder, the following should not work without specifying the subfolder UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''') UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''') self.assertIsNotNone(_snake_case) def lowerCamelCase ( self : Any): """simple docstring""" UpperCAmelCase_ = mock.Mock() UpperCAmelCase_ = 500 UpperCAmelCase_ = {} UpperCAmelCase_ = HTTPError UpperCAmelCase_ = {} # Download this model to make sure it's in the cache. UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''') # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=_snake_case) as mock_head: UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''') # This check we did call the fake head request mock_head.assert_called() def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = BertConfig.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''') def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = AutoConfig.from_pretrained('''bert-base-cased''') UpperCAmelCase_ = ['''config.4.0.0.json'''] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(_snake_case) UpperCAmelCase_ = 2 json.dump(configuration.to_dict() , open(os.path.join(_snake_case , '''config.4.0.0.json''') , '''w''')) # This should pick the new configuration file as the version of Transformers is > 4.0.0 UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) self.assertEqual(new_configuration.hidden_size , 2) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 UpperCAmelCase_ = ['''config.42.0.0.json'''] UpperCAmelCase_ = 768 configuration.save_pretrained(_snake_case) shutil.move(os.path.join(_snake_case , '''config.4.0.0.json''') , os.path.join(_snake_case , '''config.42.0.0.json''')) UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) self.assertEqual(new_configuration.hidden_size , 768) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = '''hf-internal-testing/test-two-configs''' import transformers as new_transformers UpperCAmelCase_ = '''v4.0.0''' UpperCAmelCase_ , UpperCAmelCase_ = new_transformers.models.auto.AutoConfig.from_pretrained( _snake_case , return_unused_kwargs=_snake_case) self.assertEqual(new_configuration.hidden_size , 2) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(_snake_case , {}) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers UpperCAmelCase_ = '''v3.0.0''' UpperCAmelCase_ = old_transformers.models.auto.AutoConfig.from_pretrained(_snake_case) self.assertEqual(old_configuration.hidden_size , 768)
7
0
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def A (__A : List[str] ) -> Any: """simple docstring""" UpperCAmelCase_ = 384 UpperCAmelCase_ = 7 if "tiny" in model_name: UpperCAmelCase_ = 96 UpperCAmelCase_ = (2, 2, 6, 2) UpperCAmelCase_ = (3, 6, 12, 24) elif "small" in model_name: UpperCAmelCase_ = 96 UpperCAmelCase_ = (2, 2, 18, 2) UpperCAmelCase_ = (3, 6, 12, 24) elif "base" in model_name: UpperCAmelCase_ = 128 UpperCAmelCase_ = (2, 2, 18, 2) UpperCAmelCase_ = (4, 8, 16, 32) UpperCAmelCase_ = 12 UpperCAmelCase_ = 512 elif "large" in model_name: UpperCAmelCase_ = 192 UpperCAmelCase_ = (2, 2, 18, 2) UpperCAmelCase_ = (6, 12, 24, 48) UpperCAmelCase_ = 12 UpperCAmelCase_ = 768 # set label information UpperCAmelCase_ = 150 UpperCAmelCase_ = '''huggingface/label-files''' UpperCAmelCase_ = '''ade20k-id2label.json''' UpperCAmelCase_ = json.load(open(hf_hub_download(__A , __A , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase_ = {int(__A ): v for k, v in idalabel.items()} UpperCAmelCase_ = {v: k for k, v in idalabel.items()} UpperCAmelCase_ = SwinConfig( embed_dim=__A , depths=__A , num_heads=__A , window_size=__A , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , ) UpperCAmelCase_ = UperNetConfig( backbone_config=__A , auxiliary_in_channels=__A , num_labels=__A , idalabel=__A , labelaid=__A , ) return config def A (__A : List[str] ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ = [] # fmt: off # stem rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') ) rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') ) rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') ) rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") ) if i < 3: rename_keys.append((F"""backbone.stages.{i}.downsample.reduction.weight""", F"""backbone.encoder.layers.{i}.downsample.reduction.weight""") ) rename_keys.append((F"""backbone.stages.{i}.downsample.norm.weight""", F"""backbone.encoder.layers.{i}.downsample.norm.weight""") ) rename_keys.append((F"""backbone.stages.{i}.downsample.norm.bias""", F"""backbone.encoder.layers.{i}.downsample.norm.bias""") ) rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") ) rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") ) # decode head rename_keys.extend( [ ('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''), ('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''), ('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''), ('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''), ] ) # fmt: on return rename_keys def A (__A : Optional[int] , __A : Any , __A : Tuple ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ = dct.pop(__A ) UpperCAmelCase_ = val def A (__A : str , __A : Tuple ) -> List[str]: """simple docstring""" UpperCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): UpperCAmelCase_ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) UpperCAmelCase_ = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" ) UpperCAmelCase_ = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ = in_proj_weight[:dim, :] UpperCAmelCase_ = in_proj_bias[: dim] UpperCAmelCase_ = in_proj_weight[ dim : dim * 2, : ] UpperCAmelCase_ = in_proj_bias[ dim : dim * 2 ] UpperCAmelCase_ = in_proj_weight[ -dim :, : ] UpperCAmelCase_ = in_proj_bias[-dim :] # fmt: on def A (__A : int ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = x.shape UpperCAmelCase_ = x.reshape(__A , 4 , in_channel // 4 ) UpperCAmelCase_ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(__A , __A ) return x def A (__A : str ) -> Tuple: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = x.shape UpperCAmelCase_ = x.reshape(__A , in_channel // 4 , 4 ) UpperCAmelCase_ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(__A , __A ) return x def A (__A : Union[str, Any] ) -> int: """simple docstring""" UpperCAmelCase_ = x.shape[0] UpperCAmelCase_ = x.reshape(4 , in_channel // 4 ) UpperCAmelCase_ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(__A ) return x def A (__A : Optional[Any] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ = x.shape[0] UpperCAmelCase_ = x.reshape(in_channel // 4 , 4 ) UpperCAmelCase_ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(__A ) return x def A (__A : Any , __A : Dict , __A : Optional[int] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ = { '''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''', '''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''', '''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''', '''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''', } UpperCAmelCase_ = model_name_to_url[model_name] UpperCAmelCase_ = torch.hub.load_state_dict_from_url(__A , map_location='''cpu''' , file_name=__A )[ '''state_dict''' ] for name, param in state_dict.items(): print(__A , param.shape ) UpperCAmelCase_ = get_upernet_config(__A ) UpperCAmelCase_ = UperNetForSemanticSegmentation(__A ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): UpperCAmelCase_ = state_dict.pop(__A ) if "bn" in key: UpperCAmelCase_ = key.replace('''bn''' , '''batch_norm''' ) UpperCAmelCase_ = val # rename keys UpperCAmelCase_ = create_rename_keys(__A ) for src, dest in rename_keys: rename_key(__A , __A , __A ) read_in_q_k_v(__A , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: UpperCAmelCase_ = reverse_correct_unfold_reduction_order(__A ) if "norm" in key: UpperCAmelCase_ = reverse_correct_unfold_norm_order(__A ) model.load_state_dict(__A ) # verify on image UpperCAmelCase_ = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg''' UpperCAmelCase_ = Image.open(requests.get(__A , stream=__A ).raw ).convert('''RGB''' ) UpperCAmelCase_ = SegformerImageProcessor() UpperCAmelCase_ = processor(__A , return_tensors='''pt''' ).pixel_values with torch.no_grad(): UpperCAmelCase_ = model(__A ) UpperCAmelCase_ = outputs.logits print(logits.shape ) print('''First values of logits:''' , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": UpperCAmelCase_ = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ) elif model_name == "upernet-swin-small": UpperCAmelCase_ = torch.tensor( [[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] ) elif model_name == "upernet-swin-base": UpperCAmelCase_ = torch.tensor( [[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] ) elif model_name == "upernet-swin-large": UpperCAmelCase_ = torch.tensor( [[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] ) print('''Logits:''' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , __A , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__A ) print(F"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(__A ) if push_to_hub: print(F"""Pushing model and processor for {model_name} to hub""" ) model.push_to_hub(F"""openmmlab/{model_name}""" ) processor.push_to_hub(F"""openmmlab/{model_name}""" ) if __name__ == "__main__": snake_case_ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="upernet-swin-tiny", type=str, choices=[f"upernet-swin-{size}" for size in ["tiny", "small", "base", "large"]], help="Name of the Swin + UperNet model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) snake_case_ : List[str] = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
362
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass snake_case_ : List[Any] = (3, 9, -11, 0, 7, 5, 1, -1) snake_case_ : str = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class __snake_case : UpperCAmelCase__ : int UpperCAmelCase__ : Node | None class __snake_case : def __init__( self : Optional[int] , _snake_case : Iterable[int]): """simple docstring""" UpperCAmelCase_ = None for i in sorted(_snake_case , reverse=_snake_case): UpperCAmelCase_ = Node(_snake_case , self.head) def __iter__( self : Dict): """simple docstring""" UpperCAmelCase_ = self.head while node: yield node.data UpperCAmelCase_ = node.next_node def __len__( self : int): """simple docstring""" return sum(1 for _ in self) def __str__( self : Optional[Any]): """simple docstring""" return " -> ".join([str(_snake_case) for node in self]) def A (__A : SortedLinkedList , __A : SortedLinkedList ) -> SortedLinkedList: """simple docstring""" return SortedLinkedList(list(__A ) + list(__A ) ) if __name__ == "__main__": import doctest doctest.testmod() snake_case_ : Union[str, Any] = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
7
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging snake_case_ : Union[str, Any] = logging.get_logger(__name__) snake_case_ : Dict = "▁" snake_case_ : str = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"} snake_case_ : List[Any] = { "vocab_file": { "vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model", }, "monolingual_vocab_file": { "vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt", }, } snake_case_ : Dict = {"vinai/bartpho-syllable": 1024} class __snake_case ( a ): UpperCAmelCase__ : Union[str, Any] = VOCAB_FILES_NAMES UpperCAmelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : Any = ['''input_ids''', '''attention_mask'''] def __init__( self : Optional[Any] , _snake_case : Optional[Any] , _snake_case : Dict , _snake_case : List[Any]="<s>" , _snake_case : Tuple="</s>" , _snake_case : Dict="</s>" , _snake_case : List[str]="<s>" , _snake_case : Dict="<unk>" , _snake_case : Any="<pad>" , _snake_case : Union[str, Any]="<mask>" , _snake_case : Optional[Dict[str, Any]] = None , **_snake_case : Optional[int] , ): """simple docstring""" UpperCAmelCase_ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case) if isinstance(_snake_case , _snake_case) else mask_token UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , ) UpperCAmelCase_ = vocab_file UpperCAmelCase_ = monolingual_vocab_file UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(_snake_case)) # Load the reduced vocab # Keep order of special tokens for backward compatibility UpperCAmelCase_ = {} UpperCAmelCase_ = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(_snake_case) not in self.fairseq_tokens_to_ids: UpperCAmelCase_ = cnt cnt += 1 with open(_snake_case , '''r''' , encoding='''utf-8''') as f: for line in f.readlines(): UpperCAmelCase_ = line.strip().split()[0] UpperCAmelCase_ = len(self.fairseq_tokens_to_ids) if str(_snake_case) not in self.fairseq_tokens_to_ids: UpperCAmelCase_ = len(self.fairseq_tokens_to_ids) UpperCAmelCase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : List[Any]): """simple docstring""" UpperCAmelCase_ = self.__dict__.copy() UpperCAmelCase_ = None UpperCAmelCase_ = self.sp_model.serialized_model_proto() return state def __setstate__( self : Optional[Any] , _snake_case : List[str]): """simple docstring""" UpperCAmelCase_ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs'''): UpperCAmelCase_ = {} UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) def lowerCamelCase ( self : Any , _snake_case : List[int] , _snake_case : Optional[List[int]] = None): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase_ = [self.cls_token_id] UpperCAmelCase_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase ( self : Any , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case) if token_ids_a is None: return [1] + ([0] * len(_snake_case)) + [1] return [1] + ([0] * len(_snake_case)) + [1, 1] + ([0] * len(_snake_case)) + [1] def lowerCamelCase ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None): """simple docstring""" UpperCAmelCase_ = [self.sep_token_id] UpperCAmelCase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] @property def lowerCamelCase ( self : List[str]): """simple docstring""" return len(self.fairseq_ids_to_tokens) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = {self.convert_ids_to_tokens(_snake_case): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def lowerCamelCase ( self : str , _snake_case : str): """simple docstring""" return self.sp_model.encode(_snake_case , out_type=_snake_case) def lowerCamelCase ( self : str , _snake_case : Tuple): """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def lowerCamelCase ( self : Dict , _snake_case : Union[str, Any]): """simple docstring""" return self.fairseq_ids_to_tokens[index] def lowerCamelCase ( self : str , _snake_case : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = ''''''.join(_snake_case).replace(_snake_case , ''' ''').strip() return out_string def lowerCamelCase ( self : Any , _snake_case : str , _snake_case : Optional[str] = None): """simple docstring""" if not os.path.isdir(_snake_case): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""") return UpperCAmelCase_ = os.path.join( _snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) UpperCAmelCase_ = os.path.join( _snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , ) if os.path.abspath(self.vocab_file) != os.path.abspath(_snake_case) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , _snake_case) elif not os.path.isfile(self.vocab_file): with open(_snake_case , '''wb''') as fi: UpperCAmelCase_ = self.sp_model.serialized_model_proto() fi.write(_snake_case) if os.path.abspath(self.monolingual_vocab_file) != os.path.abspath( _snake_case) and os.path.isfile(self.monolingual_vocab_file): copyfile(self.monolingual_vocab_file , _snake_case) elif not os.path.isfile(self.monolingual_vocab_file): with open(_snake_case , '''w''' , encoding='''utf-8''') as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(F"""{str(_snake_case)} \n""") return out_vocab_file, out_monolingual_vocab_file
363
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig snake_case_ : Union[str, Any] = logging.get_logger(__name__) class __snake_case : def __init__( self : int , _snake_case : List[Any] , _snake_case : Tuple): """simple docstring""" UpperCAmelCase_ = question_encoder UpperCAmelCase_ = generator UpperCAmelCase_ = self.question_encoder def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int]): """simple docstring""" if os.path.isfile(_snake_case): raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""") os.makedirs(_snake_case , exist_ok=_snake_case) UpperCAmelCase_ = os.path.join(_snake_case , '''question_encoder_tokenizer''') UpperCAmelCase_ = os.path.join(_snake_case , '''generator_tokenizer''') self.question_encoder.save_pretrained(_snake_case) self.generator.save_pretrained(_snake_case) @classmethod def lowerCamelCase ( cls : Optional[Any] , _snake_case : Optional[Any] , **_snake_case : Optional[int]): """simple docstring""" from ..auto.tokenization_auto import AutoTokenizer UpperCAmelCase_ = kwargs.pop('''config''' , _snake_case) if config is None: UpperCAmelCase_ = RagConfig.from_pretrained(_snake_case) UpperCAmelCase_ = AutoTokenizer.from_pretrained( _snake_case , config=config.question_encoder , subfolder='''question_encoder_tokenizer''') UpperCAmelCase_ = AutoTokenizer.from_pretrained( _snake_case , config=config.generator , subfolder='''generator_tokenizer''') return cls(question_encoder=_snake_case , generator=_snake_case) def __call__( self : List[Any] , *_snake_case : List[str] , **_snake_case : List[Any]): """simple docstring""" return self.current_tokenizer(*_snake_case , **_snake_case) def lowerCamelCase ( self : List[Any] , *_snake_case : str , **_snake_case : Union[str, Any]): """simple docstring""" return self.generator.batch_decode(*_snake_case , **_snake_case) def lowerCamelCase ( self : str , *_snake_case : Optional[int] , **_snake_case : Any): """simple docstring""" return self.generator.decode(*_snake_case , **_snake_case) def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = self.question_encoder def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = self.generator def lowerCamelCase ( self : Optional[Any] , _snake_case : List[str] , _snake_case : Optional[List[str]] = None , _snake_case : Optional[int] = None , _snake_case : Optional[int] = None , _snake_case : str = "longest" , _snake_case : str = None , _snake_case : bool = True , **_snake_case : Optional[int] , ): """simple docstring""" warnings.warn( '''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ''' '''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ''' '''context manager to prepare your targets. See the documentation of your specific tokenizer for more ''' '''details''' , _snake_case , ) if max_length is None: UpperCAmelCase_ = self.current_tokenizer.model_max_length UpperCAmelCase_ = self( _snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , max_length=_snake_case , padding=_snake_case , truncation=_snake_case , **_snake_case , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: UpperCAmelCase_ = self.current_tokenizer.model_max_length UpperCAmelCase_ = self( text_target=_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , **_snake_case , ) UpperCAmelCase_ = labels['''input_ids'''] return model_inputs
7
0
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class __snake_case : def __init__( self : Optional[Any] , _snake_case : int , _snake_case : Dict=13 , _snake_case : int=7 , _snake_case : Dict=False , _snake_case : List[str]=True , _snake_case : int=False , _snake_case : int=True , _snake_case : Union[str, Any]=33 , _snake_case : int=32 , _snake_case : Dict=5 , _snake_case : Union[str, Any]=4 , _snake_case : Tuple=37 , _snake_case : List[str]="gelu" , _snake_case : Tuple=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : Union[str, Any]=512 , _snake_case : Union[str, Any]=16 , _snake_case : str=2 , _snake_case : Dict=0.0_2 , _snake_case : str=3 , _snake_case : Optional[Any]=4 , _snake_case : Any=None , ): """simple docstring""" UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = seq_length UpperCAmelCase_ = is_training UpperCAmelCase_ = use_input_mask UpperCAmelCase_ = use_token_type_ids UpperCAmelCase_ = use_labels UpperCAmelCase_ = vocab_size UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = type_vocab_size UpperCAmelCase_ = type_sequence_label_size UpperCAmelCase_ = initializer_range UpperCAmelCase_ = num_labels UpperCAmelCase_ = num_choices UpperCAmelCase_ = scope def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) UpperCAmelCase_ = None if self.use_input_mask: UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length]) UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = None if self.use_labels: UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size) UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices) UpperCAmelCase_ = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase ( self : Any): """simple docstring""" return EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def lowerCamelCase ( self : int , _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = EsmModel(config=_snake_case) model.to(_snake_case) model.eval() UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case) UpperCAmelCase_ = model(_snake_case) UpperCAmelCase_ = model(_snake_case) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def lowerCamelCase ( self : List[Any] , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : str , _snake_case : Any , _snake_case : Optional[Any] , _snake_case : Optional[Any]): """simple docstring""" UpperCAmelCase_ = EsmForMaskedLM(config=_snake_case) model.to(_snake_case) model.eval() UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , labels=_snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def lowerCamelCase ( self : Union[str, Any] , _snake_case : str , _snake_case : Any , _snake_case : List[Any] , _snake_case : str , _snake_case : List[Any] , _snake_case : Optional[int]): """simple docstring""" UpperCAmelCase_ = self.num_labels UpperCAmelCase_ = EsmForTokenClassification(config=_snake_case) model.to(_snake_case) model.eval() UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , labels=_snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) = config_and_inputs UpperCAmelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __snake_case ( a , a , unittest.TestCase ): UpperCAmelCase__ : List[str] = False UpperCAmelCase__ : Optional[int] = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) UpperCAmelCase__ : Tuple = () UpperCAmelCase__ : Any = ( { '''feature-extraction''': EsmModel, '''fill-mask''': EsmForMaskedLM, '''text-classification''': EsmForSequenceClassification, '''token-classification''': EsmForTokenClassification, '''zero-shot''': EsmForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase__ : Dict = True def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = EsmModelTester(self) UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , hidden_size=37) def lowerCamelCase ( self : int): """simple docstring""" self.config_tester.run_common_tests() def lowerCamelCase ( self : Any): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case) def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase_ = type self.model_tester.create_and_check_model(*_snake_case) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_snake_case) def lowerCamelCase ( self : Any): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_snake_case) @slow def lowerCamelCase ( self : str): """simple docstring""" for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ = EsmModel.from_pretrained(_snake_case) self.assertIsNotNone(_snake_case) def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()[0] UpperCAmelCase_ = EsmEmbeddings(config=_snake_case) UpperCAmelCase_ = torch.as_tensor([[12, 31, 13, model.padding_idx]]) UpperCAmelCase_ = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ]) UpperCAmelCase_ = create_position_ids_from_input_ids(_snake_case , model.padding_idx) self.assertEqual(position_ids.shape , expected_positions.shape) self.assertTrue(torch.all(torch.eq(_snake_case , _snake_case))) def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()[0] UpperCAmelCase_ = EsmEmbeddings(config=_snake_case) UpperCAmelCase_ = torch.empty(2 , 4 , 30) UpperCAmelCase_ = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] UpperCAmelCase_ = torch.as_tensor([expected_single_positions, expected_single_positions]) UpperCAmelCase_ = embeddings.create_position_ids_from_inputs_embeds(_snake_case) self.assertEqual(position_ids.shape , expected_positions.shape) self.assertTrue(torch.all(torch.eq(_snake_case , _snake_case))) @unittest.skip('''Esm does not support embedding resizing''') def lowerCamelCase ( self : List[str]): """simple docstring""" pass @unittest.skip('''Esm does not support embedding resizing''') def lowerCamelCase ( self : Optional[int]): """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''') def lowerCamelCase ( self : Any): """simple docstring""" pass @require_torch class __snake_case ( a ): @slow def lowerCamelCase ( self : Dict): """simple docstring""" with torch.no_grad(): UpperCAmelCase_ = EsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''') model.eval() UpperCAmelCase_ = torch.tensor([[0, 1, 2, 3, 4, 5]]) UpperCAmelCase_ = model(_snake_case)[0] UpperCAmelCase_ = 33 UpperCAmelCase_ = torch.Size((1, 6, vocab_size)) self.assertEqual(output.shape , _snake_case) UpperCAmelCase_ = torch.tensor( [[[8.9_2_1_5, -10.5898, -6.4_6_7_1], [-6.3_9_6_7, -13.9114, -1.1_2_1_2], [-7.7_8_1_2, -13.9516, -3.7_4_0_6]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , _snake_case , atol=1e-4)) @slow def lowerCamelCase ( self : Optional[Any]): """simple docstring""" with torch.no_grad(): UpperCAmelCase_ = EsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''') model.eval() UpperCAmelCase_ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]]) UpperCAmelCase_ = model(_snake_case)[0] # compare the actual values for a slice. UpperCAmelCase_ = torch.tensor( [[[0.1_4_4_4, 0.5_4_1_3, 0.3_2_4_8], [0.3_0_3_4, 0.0_0_5_3, 0.3_1_0_8], [0.3_2_2_8, -0.2_4_9_9, 0.3_4_1_5]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , _snake_case , atol=1e-4))
364
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class __snake_case ( unittest.TestCase ): @slow def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-base''') UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]]) # The dog is cute and lives in the garden house UpperCAmelCase_ = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim UpperCAmelCase_ = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach() self.assertEqual(output.shape , _snake_case) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3)) @slow def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-large''') UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]]) # The dog is cute and lives in the garden house UpperCAmelCase_ = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim UpperCAmelCase_ = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach() self.assertEqual(output.shape , _snake_case) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3))
7
0
"""simple docstring""" import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed snake_case_ : int = "true" def A (__A : Tuple , __A : Optional[Any]=82 , __A : Optional[Any]=16 ) -> int: """simple docstring""" set_seed(42 ) UpperCAmelCase_ = RegressionModel() UpperCAmelCase_ = deepcopy(__A ) UpperCAmelCase_ = RegressionDataset(length=__A ) UpperCAmelCase_ = DataLoader(__A , batch_size=__A ) model.to(accelerator.device ) UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(__A , __A ) return model, ddp_model, dataloader def A (__A : Accelerator , __A : Dict=False ) -> List[str]: """simple docstring""" UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' ) UpperCAmelCase_ = load_dataset('''glue''' , '''mrpc''' , split='''validation''' ) def tokenize_function(__A : List[Any] ): UpperCAmelCase_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__A , max_length=__A ) return outputs with accelerator.main_process_first(): UpperCAmelCase_ = dataset.map( __A , batched=__A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) UpperCAmelCase_ = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(__A : int ): if use_longest: return tokenizer.pad(__A , padding='''longest''' , return_tensors='''pt''' ) return tokenizer.pad(__A , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return DataLoader(__A , shuffle=__A , collate_fn=__A , batch_size=16 ) def A (__A : List[str] , __A : Tuple ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ = Accelerator(dispatch_batches=__A , split_batches=__A ) UpperCAmelCase_ = get_dataloader(__A , not dispatch_batches ) UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained( '''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=__A ) UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(__A , __A ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def A (__A : Any , __A : Optional[Any] , __A : str ) -> int: """simple docstring""" UpperCAmelCase_ = [] for batch in dataloader: UpperCAmelCase_ , UpperCAmelCase_ = batch.values() with torch.no_grad(): UpperCAmelCase_ = model(__A ) UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) UpperCAmelCase_ , UpperCAmelCase_ = [], [] for logit, targ in logits_and_targets: logits.append(__A ) targs.append(__A ) UpperCAmelCase_ , UpperCAmelCase_ = torch.cat(__A ), torch.cat(__A ) return logits, targs def A (__A : Accelerator , __A : Tuple=82 , __A : Tuple=False , __A : Tuple=False , __A : Optional[Any]=16 ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_basic_setup(__A , __A , __A ) UpperCAmelCase_ , UpperCAmelCase_ = generate_predictions(__A , __A , __A ) assert ( len(__A ) == num_samples ), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__A )}""" def A (__A : bool = False , __A : bool = False ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ = evaluate.load('''glue''' , '''mrpc''' ) UpperCAmelCase_ , UpperCAmelCase_ = get_mrpc_setup(__A , __A ) # First do baseline UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = setup['''no'''] model.to(__A ) model.eval() for batch in dataloader: batch.to(__A ) with torch.inference_mode(): UpperCAmelCase_ = model(**__A ) UpperCAmelCase_ = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=__A , references=batch['''labels'''] ) UpperCAmelCase_ = metric.compute() # Then do distributed UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = setup['''ddp'''] model.eval() for batch in dataloader: with torch.inference_mode(): UpperCAmelCase_ = model(**__A ) UpperCAmelCase_ = outputs.logits.argmax(dim=-1 ) UpperCAmelCase_ = batch['''labels'''] UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=__A , references=__A ) UpperCAmelCase_ = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n""" def A () -> Any: """simple docstring""" UpperCAmelCase_ = Accelerator(split_batches=__A , dispatch_batches=__A ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('''**Testing gather_for_metrics**''' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" ) test_mrpc(__A , __A ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('''**Test torch metrics**''' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: UpperCAmelCase_ = Accelerator(split_batches=__A , dispatch_batches=__A ) if accelerator.is_local_main_process: print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" ) test_torch_metrics(__A , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('''**Test last batch is not dropped when perfectly divisible**''' ) UpperCAmelCase_ = Accelerator() test_torch_metrics(__A , 512 ) accelerator.state._reset_state() def A (__A : str ) -> List[Any]: """simple docstring""" main() if __name__ == "__main__": main()
365
from maths.prime_factors import prime_factors def A (__A : int ) -> int: """simple docstring""" if not isinstance(__A , __A ): UpperCAmelCase_ = F"""Input value of [number={number}] must be an integer""" raise TypeError(__A ) if number < 1: raise ValueError('''Input must be a positive integer''' ) return -1 if len(prime_factors(__A ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
7
0
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() snake_case_ : int = logging.get_logger(__name__) def A (__A : List[Any] ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: UpperCAmelCase_ = [144, 192, 240] UpperCAmelCase_ = [16, 32, 64, 96, 128, 160, 640] elif "mobilevit_xs" in mobilevit_name: UpperCAmelCase_ = [96, 120, 144] UpperCAmelCase_ = [16, 32, 48, 64, 80, 96, 384] elif "mobilevit_xxs" in mobilevit_name: UpperCAmelCase_ = [64, 80, 96] UpperCAmelCase_ = [16, 16, 24, 48, 64, 80, 320] UpperCAmelCase_ = 0.05 UpperCAmelCase_ = 2.0 if mobilevit_name.startswith('''deeplabv3_''' ): UpperCAmelCase_ = 512 UpperCAmelCase_ = 16 UpperCAmelCase_ = 21 UpperCAmelCase_ = '''pascal-voc-id2label.json''' else: UpperCAmelCase_ = 1000 UpperCAmelCase_ = '''imagenet-1k-id2label.json''' UpperCAmelCase_ = '''huggingface/label-files''' UpperCAmelCase_ = json.load(open(hf_hub_download(__A , __A , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase_ = {int(__A ): v for k, v in idalabel.items()} UpperCAmelCase_ = idalabel UpperCAmelCase_ = {v: k for k, v in idalabel.items()} return config def A (__A : Any , __A : List[Any]=False ) -> Union[str, Any]: """simple docstring""" for i in range(1 , 6 ): if F"""layer_{i}.""" in name: UpperCAmelCase_ = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" ) if "conv_1." in name: UpperCAmelCase_ = name.replace('''conv_1.''' , '''conv_stem.''' ) if ".block." in name: UpperCAmelCase_ = name.replace('''.block.''' , '''.''' ) if "exp_1x1" in name: UpperCAmelCase_ = name.replace('''exp_1x1''' , '''expand_1x1''' ) if "red_1x1" in name: UpperCAmelCase_ = name.replace('''red_1x1''' , '''reduce_1x1''' ) if ".local_rep.conv_3x3." in name: UpperCAmelCase_ = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' ) if ".local_rep.conv_1x1." in name: UpperCAmelCase_ = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' ) if ".norm." in name: UpperCAmelCase_ = name.replace('''.norm.''' , '''.normalization.''' ) if ".conv." in name: UpperCAmelCase_ = name.replace('''.conv.''' , '''.convolution.''' ) if ".conv_proj." in name: UpperCAmelCase_ = name.replace('''.conv_proj.''' , '''.conv_projection.''' ) for i in range(0 , 2 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: UpperCAmelCase_ = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" ) for i in range(2 , 6 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: UpperCAmelCase_ = name.replace(F""".{i}.{j}.""" , F""".{i}.""" ) if "expand_1x1" in name: UpperCAmelCase_ = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' ) if "conv_3x3" in name: UpperCAmelCase_ = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' ) if "reduce_1x1" in name: UpperCAmelCase_ = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' ) for i in range(2 , 5 ): if F""".global_rep.{i}.weight""" in name: UpperCAmelCase_ = name.replace(F""".global_rep.{i}.weight""" , '''.layernorm.weight''' ) if F""".global_rep.{i}.bias""" in name: UpperCAmelCase_ = name.replace(F""".global_rep.{i}.bias""" , '''.layernorm.bias''' ) if ".global_rep." in name: UpperCAmelCase_ = name.replace('''.global_rep.''' , '''.transformer.''' ) if ".pre_norm_mha.0." in name: UpperCAmelCase_ = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' ) if ".pre_norm_mha.1.out_proj." in name: UpperCAmelCase_ = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' ) if ".pre_norm_ffn.0." in name: UpperCAmelCase_ = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' ) if ".pre_norm_ffn.1." in name: UpperCAmelCase_ = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' ) if ".pre_norm_ffn.4." in name: UpperCAmelCase_ = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' ) if ".transformer." in name: UpperCAmelCase_ = name.replace('''.transformer.''' , '''.transformer.layer.''' ) if ".aspp_layer." in name: UpperCAmelCase_ = name.replace('''.aspp_layer.''' , '''.''' ) if ".aspp_pool." in name: UpperCAmelCase_ = name.replace('''.aspp_pool.''' , '''.''' ) if "seg_head." in name: UpperCAmelCase_ = name.replace('''seg_head.''' , '''segmentation_head.''' ) if "segmentation_head.classifier.classifier." in name: UpperCAmelCase_ = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' ) if "classifier.fc." in name: UpperCAmelCase_ = name.replace('''classifier.fc.''' , '''classifier.''' ) elif (not base_model) and ("segmentation_head." not in name): UpperCAmelCase_ = '''mobilevit.''' + name return name def A (__A : Optional[int] , __A : Optional[int] , __A : Optional[Any]=False ) -> Optional[Any]: """simple docstring""" if base_model: UpperCAmelCase_ = '''''' else: UpperCAmelCase_ = '''mobilevit.''' for key in orig_state_dict.copy().keys(): UpperCAmelCase_ = orig_state_dict.pop(__A ) if key[:8] == "encoder.": UpperCAmelCase_ = key[8:] if "qkv" in key: UpperCAmelCase_ = key.split('''.''' ) UpperCAmelCase_ = int(key_split[0][6:] ) - 1 UpperCAmelCase_ = int(key_split[3] ) UpperCAmelCase_ = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" ) UpperCAmelCase_ = layer.transformer.layer[transformer_num].attention.attention.all_head_size UpperCAmelCase_ = ( F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.""" ) if "weight" in key: UpperCAmelCase_ = val[:dim, :] UpperCAmelCase_ = val[dim : dim * 2, :] UpperCAmelCase_ = val[-dim:, :] else: UpperCAmelCase_ = val[:dim] UpperCAmelCase_ = val[dim : dim * 2] UpperCAmelCase_ = val[-dim:] else: UpperCAmelCase_ = val return orig_state_dict def A () -> int: """simple docstring""" UpperCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase_ = Image.open(requests.get(__A , stream=__A ).raw ) return im @torch.no_grad() def A (__A : Any , __A : List[str] , __A : Optional[Any] , __A : int=False ) -> str: """simple docstring""" UpperCAmelCase_ = get_mobilevit_config(__A ) # load original state_dict UpperCAmelCase_ = torch.load(__A , map_location='''cpu''' ) # load 🤗 model if mobilevit_name.startswith('''deeplabv3_''' ): UpperCAmelCase_ = MobileViTForSemanticSegmentation(__A ).eval() else: UpperCAmelCase_ = MobileViTForImageClassification(__A ).eval() UpperCAmelCase_ = convert_state_dict(__A , __A ) model.load_state_dict(__A ) # Check outputs on an image, prepared by MobileViTImageProcessor UpperCAmelCase_ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) UpperCAmelCase_ = image_processor(images=prepare_img() , return_tensors='''pt''' ) UpperCAmelCase_ = model(**__A ) UpperCAmelCase_ = outputs.logits if mobilevit_name.startswith('''deeplabv3_''' ): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": UpperCAmelCase_ = torch.tensor( [ [[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]], [[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]], [[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": UpperCAmelCase_ = torch.tensor( [ [[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]], [[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]], [[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": UpperCAmelCase_ = torch.tensor( [ [[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]], [[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]], [[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]], ] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3, :3, :3] , __A , atol=1E-4 ) else: assert logits.shape == (1, 1000) if mobilevit_name == "mobilevit_s": UpperCAmelCase_ = torch.tensor([-0.9_866, 0.2_392, -1.1_241] ) elif mobilevit_name == "mobilevit_xs": UpperCAmelCase_ = torch.tensor([-2.4_761, -0.9_399, -1.9_587] ) elif mobilevit_name == "mobilevit_xxs": UpperCAmelCase_ = torch.tensor([-1.9_364, -1.2_327, -0.4_653] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3] , __A , atol=1E-4 ) Path(__A ).mkdir(exist_ok=__A ) print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__A ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__A ) if push_to_hub: UpperCAmelCase_ = { '''mobilevit_s''': '''mobilevit-small''', '''mobilevit_xs''': '''mobilevit-x-small''', '''mobilevit_xxs''': '''mobilevit-xx-small''', '''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''', '''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''', '''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''', } print('''Pushing to the hub...''' ) UpperCAmelCase_ = model_mapping[mobilevit_name] image_processor.push_to_hub(__A , organization='''apple''' ) model.push_to_hub(__A , organization='''apple''' ) if __name__ == "__main__": snake_case_ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( "--mobilevit_name", default="mobilevit_s", type=str, help=( "Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs'," " 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'." ), ) parser.add_argument( "--checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)." ) parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) snake_case_ : Union[str, Any] = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
366
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class __snake_case ( unittest.TestCase ): def lowerCamelCase ( self : Optional[int] , _snake_case : Union[str, Any]): """simple docstring""" for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss''']): UpperCAmelCase_ = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(_snake_case) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = '''sgugger/tiny-distilbert-classification''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , only_pretrain_model=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , torchscript=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''') def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , fpaa=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) # set architectures equal to `None` UpperCAmelCase_ = None UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config]) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) @unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''') def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_snake_case , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config]) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tinier_bart''' UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config]) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config]) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tinier_bart''' UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config]) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , save_to_csv=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_snake_case , '''inf_time.csv''') , train_memory_csv_file=os.path.join(_snake_case , '''train_mem.csv''') , inference_memory_csv_file=os.path.join(_snake_case , '''inf_mem.csv''') , train_time_csv_file=os.path.join(_snake_case , '''train_time.csv''') , env_info_csv_file=os.path.join(_snake_case , '''env.csv''') , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) benchmark.run() self.assertTrue(Path(os.path.join(_snake_case , '''inf_time.csv''')).exists()) self.assertTrue(Path(os.path.join(_snake_case , '''train_time.csv''')).exists()) self.assertTrue(Path(os.path.join(_snake_case , '''inf_mem.csv''')).exists()) self.assertTrue(Path(os.path.join(_snake_case , '''train_mem.csv''')).exists()) self.assertTrue(Path(os.path.join(_snake_case , '''env.csv''')).exists()) def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(_snake_case : Tuple): self.assertTrue(hasattr(_snake_case , '''sequential''')) self.assertTrue(hasattr(_snake_case , '''cumulative''')) self.assertTrue(hasattr(_snake_case , '''current''')) self.assertTrue(hasattr(_snake_case , '''total''')) with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_snake_case , '''log.txt''') , log_print=_snake_case , trace_memory_line_by_line=_snake_case , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() _check_summary_is_not_empty(result.inference_summary) _check_summary_is_not_empty(result.train_summary) self.assertTrue(Path(os.path.join(_snake_case , '''log.txt''')).exists())
7
0
from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING snake_case_ : int = logging.get_logger(__name__) @add_end_docstrings(a ) class __snake_case ( a ): def __init__( self : int , *_snake_case : Optional[int] , **_snake_case : Any): """simple docstring""" super().__init__(*_snake_case , **_snake_case) requires_backends(self , '''decord''') self.check_model_type(_snake_case) def lowerCamelCase ( self : Dict , _snake_case : Optional[int]=None , _snake_case : Optional[int]=None , _snake_case : Any=None): """simple docstring""" UpperCAmelCase_ = {} if frame_sampling_rate is not None: UpperCAmelCase_ = frame_sampling_rate if num_frames is not None: UpperCAmelCase_ = num_frames UpperCAmelCase_ = {} if top_k is not None: UpperCAmelCase_ = top_k return preprocess_params, {}, postprocess_params def __call__( self : List[Any] , _snake_case : Union[str, List[str]] , **_snake_case : Any): """simple docstring""" return super().__call__(_snake_case , **_snake_case) def lowerCamelCase ( self : int , _snake_case : List[str] , _snake_case : Dict=None , _snake_case : Tuple=1): """simple docstring""" if num_frames is None: UpperCAmelCase_ = self.model.config.num_frames if video.startswith('''http://''') or video.startswith('''https://'''): UpperCAmelCase_ = BytesIO(requests.get(_snake_case).content) UpperCAmelCase_ = VideoReader(_snake_case) videoreader.seek(0) UpperCAmelCase_ = 0 UpperCAmelCase_ = num_frames * frame_sampling_rate - 1 UpperCAmelCase_ = np.linspace(_snake_case , _snake_case , num=_snake_case , dtype=np.intaa) UpperCAmelCase_ = videoreader.get_batch(_snake_case).asnumpy() UpperCAmelCase_ = list(_snake_case) UpperCAmelCase_ = self.image_processor(_snake_case , return_tensors=self.framework) return model_inputs def lowerCamelCase ( self : List[Any] , _snake_case : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = self.model(**_snake_case) return model_outputs def lowerCamelCase ( self : Any , _snake_case : Union[str, Any] , _snake_case : int=5): """simple docstring""" if top_k > self.model.config.num_labels: UpperCAmelCase_ = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase_ = model_outputs.logits.softmax(-1)[0] UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(_snake_case) else: raise ValueError(F"""Unsupported framework: {self.framework}""") UpperCAmelCase_ = scores.tolist() UpperCAmelCase_ = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_snake_case , _snake_case)]
367
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def A (__A : BertModel , __A : str , __A : str ) -> int: """simple docstring""" UpperCAmelCase_ = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''') UpperCAmelCase_ = ( ('''layer.''', '''layer_'''), ('''word_embeddings.weight''', '''word_embeddings'''), ('''position_embeddings.weight''', '''position_embeddings'''), ('''token_type_embeddings.weight''', '''token_type_embeddings'''), ('''.''', '''/'''), ('''LayerNorm/weight''', '''LayerNorm/gamma'''), ('''LayerNorm/bias''', '''LayerNorm/beta'''), ('''weight''', '''kernel'''), ) if not os.path.isdir(__A ): os.makedirs(__A ) UpperCAmelCase_ = model.state_dict() def to_tf_var_name(__A : str ): for patt, repl in iter(__A ): UpperCAmelCase_ = name.replace(__A , __A ) return F"""bert/{name}""" def create_tf_var(__A : np.ndarray , __A : str , __A : tf.Session ): UpperCAmelCase_ = tf.dtypes.as_dtype(tensor.dtype ) UpperCAmelCase_ = tf.get_variable(dtype=__A , shape=tensor.shape , name=__A , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__A ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: UpperCAmelCase_ = to_tf_var_name(__A ) UpperCAmelCase_ = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): UpperCAmelCase_ = torch_tensor.T UpperCAmelCase_ = create_tf_var(tensor=__A , name=__A , session=__A ) tf.keras.backend.set_value(__A , __A ) UpperCAmelCase_ = session.run(__A ) print(F"""Successfully created {tf_name}: {np.allclose(__A , __A )}""" ) UpperCAmelCase_ = tf.train.Saver(tf.trainable_variables() ) saver.save(__A , os.path.join(__A , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) ) def A (__A : Any=None ) -> str: """simple docstring""" UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--model_name''' , type=__A , required=__A , help='''model name e.g. bert-base-uncased''' ) parser.add_argument( '''--cache_dir''' , type=__A , default=__A , required=__A , help='''Directory containing pytorch model''' ) parser.add_argument('''--pytorch_model_path''' , type=__A , required=__A , help='''/path/to/<pytorch-model-name>.bin''' ) parser.add_argument('''--tf_cache_dir''' , type=__A , required=__A , help='''Directory in which to save tensorflow model''' ) UpperCAmelCase_ = parser.parse_args(__A ) UpperCAmelCase_ = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=__A , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
7
0
from ...configuration_utils import PretrainedConfig snake_case_ : Optional[Any] = { "google/tapas-base-finetuned-sqa": ( "https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json" ), "google/tapas-base-finetuned-wtq": ( "https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json" ), "google/tapas-base-finetuned-wikisql-supervised": ( "https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json" ), "google/tapas-base-finetuned-tabfact": ( "https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json" ), } class __snake_case ( a ): UpperCAmelCase__ : List[str] = '''tapas''' def __init__( self : Any , _snake_case : Union[str, Any]=30522 , _snake_case : Dict=768 , _snake_case : List[Any]=12 , _snake_case : Union[str, Any]=12 , _snake_case : List[str]=3072 , _snake_case : Dict="gelu" , _snake_case : List[Any]=0.1 , _snake_case : int=0.1 , _snake_case : Optional[int]=1024 , _snake_case : Dict=[3, 256, 256, 2, 256, 256, 10] , _snake_case : Tuple=0.0_2 , _snake_case : List[str]=1e-12 , _snake_case : int=0 , _snake_case : Any=10.0 , _snake_case : Tuple=0 , _snake_case : Tuple=1.0 , _snake_case : List[Any]=None , _snake_case : List[str]=1.0 , _snake_case : List[str]=False , _snake_case : Optional[Any]=None , _snake_case : Optional[int]=1.0 , _snake_case : List[Any]=1.0 , _snake_case : List[str]=False , _snake_case : List[Any]=False , _snake_case : str="ratio" , _snake_case : Optional[Any]=None , _snake_case : List[str]=None , _snake_case : Union[str, Any]=64 , _snake_case : str=32 , _snake_case : str=False , _snake_case : Dict=True , _snake_case : int=False , _snake_case : Any=False , _snake_case : List[Any]=True , _snake_case : str=False , _snake_case : str=None , _snake_case : Tuple=None , **_snake_case : str , ): """simple docstring""" super().__init__(pad_token_id=_snake_case , **_snake_case) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) UpperCAmelCase_ = vocab_size UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = hidden_act UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = type_vocab_sizes UpperCAmelCase_ = initializer_range UpperCAmelCase_ = layer_norm_eps # Fine-tuning task hyperparameters UpperCAmelCase_ = positive_label_weight UpperCAmelCase_ = num_aggregation_labels UpperCAmelCase_ = aggregation_loss_weight UpperCAmelCase_ = use_answer_as_supervision UpperCAmelCase_ = answer_loss_importance UpperCAmelCase_ = use_normalized_answer_loss UpperCAmelCase_ = huber_loss_delta UpperCAmelCase_ = temperature UpperCAmelCase_ = aggregation_temperature UpperCAmelCase_ = use_gumbel_for_cells UpperCAmelCase_ = use_gumbel_for_aggregation UpperCAmelCase_ = average_approximation_function UpperCAmelCase_ = cell_selection_preference UpperCAmelCase_ = answer_loss_cutoff UpperCAmelCase_ = max_num_rows UpperCAmelCase_ = max_num_columns UpperCAmelCase_ = average_logits_per_cell UpperCAmelCase_ = select_one_column UpperCAmelCase_ = allow_empty_column_selection UpperCAmelCase_ = init_cell_selection_weights_to_zero UpperCAmelCase_ = reset_position_index_per_cell UpperCAmelCase_ = disable_per_token_loss # Aggregation hyperparameters UpperCAmelCase_ = aggregation_labels UpperCAmelCase_ = no_aggregation_label_index if isinstance(self.aggregation_labels , _snake_case): UpperCAmelCase_ = {int(_snake_case): v for k, v in aggregation_labels.items()}
368
import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __snake_case ( unittest.TestCase ): def __init__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict=3 , _snake_case : Dict=32 , _snake_case : List[str]=3 , _snake_case : Union[str, Any]=10 , _snake_case : Tuple=[10, 20, 30, 40] , _snake_case : Dict=[1, 1, 2, 1] , _snake_case : List[Any]=True , _snake_case : Dict=True , _snake_case : Union[str, Any]="relu" , _snake_case : Tuple=3 , _snake_case : Union[str, Any]=None , ): """simple docstring""" UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = image_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = embeddings_size UpperCAmelCase_ = hidden_sizes UpperCAmelCase_ = depths UpperCAmelCase_ = is_training UpperCAmelCase_ = use_labels UpperCAmelCase_ = hidden_act UpperCAmelCase_ = num_labels UpperCAmelCase_ = scope UpperCAmelCase_ = len(_snake_case) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) UpperCAmelCase_ = self.get_config() return config, pixel_values def lowerCamelCase ( self : List[Any]): """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowerCamelCase ( self : Optional[int] , _snake_case : List[Any] , _snake_case : Optional[int]): """simple docstring""" UpperCAmelCase_ = FlaxRegNetModel(config=_snake_case) UpperCAmelCase_ = model(_snake_case) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : Optional[int]): """simple docstring""" UpperCAmelCase_ = self.num_labels UpperCAmelCase_ = FlaxRegNetForImageClassification(config=_snake_case) UpperCAmelCase_ = model(_snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs UpperCAmelCase_ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_flax class __snake_case ( a , unittest.TestCase ): UpperCAmelCase__ : Union[str, Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () UpperCAmelCase__ : Tuple = False UpperCAmelCase__ : List[str] = False UpperCAmelCase__ : int = False def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = FlaxRegNetModelTester(self) UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case) def lowerCamelCase ( self : List[Any]): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase ( self : List[str]): """simple docstring""" return def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case) @unittest.skip(reason='''RegNet does not use inputs_embeds''') def lowerCamelCase ( self : Optional[Any]): """simple docstring""" pass @unittest.skip(reason='''RegNet does not support input and output embeddings''') def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" pass def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ = model_class(_snake_case) UpperCAmelCase_ = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ = [*signature.parameters.keys()] UpperCAmelCase_ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _snake_case) def lowerCamelCase ( self : Optional[int]): """simple docstring""" def check_hidden_states_output(_snake_case : List[str] , _snake_case : Dict , _snake_case : List[str]): UpperCAmelCase_ = model_class(_snake_case) UpperCAmelCase_ = model(**self._prepare_for_class(_snake_case , _snake_case)) UpperCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase_ = self.model_tester.num_stages self.assertEqual(len(_snake_case) , expected_num_stages + 1) UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ = True check_hidden_states_output(_snake_case , _snake_case , _snake_case) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ = True check_hidden_states_output(_snake_case , _snake_case , _snake_case) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): UpperCAmelCase_ = self._prepare_for_class(_snake_case , _snake_case) UpperCAmelCase_ = model_class(_snake_case) @jax.jit def model_jitted(_snake_case : str , **_snake_case : Union[str, Any]): return model(pixel_values=_snake_case , **_snake_case) with self.subTest('''JIT Enabled'''): UpperCAmelCase_ = model_jitted(**_snake_case).to_tuple() with self.subTest('''JIT Disabled'''): with jax.disable_jit(): UpperCAmelCase_ = model_jitted(**_snake_case).to_tuple() self.assertEqual(len(_snake_case) , len(_snake_case)) for jitted_output, output in zip(_snake_case , _snake_case): self.assertEqual(jitted_output.shape , output.shape) def A () -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_flax class __snake_case ( unittest.TestCase ): @cached_property def lowerCamelCase ( self : Dict): """simple docstring""" return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''') if is_vision_available() else None @slow def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''') UpperCAmelCase_ = self.default_image_processor UpperCAmelCase_ = prepare_img() UpperCAmelCase_ = image_processor(images=_snake_case , return_tensors='''np''') UpperCAmelCase_ = model(**_snake_case) # verify the logits UpperCAmelCase_ = (1, 1000) self.assertEqual(outputs.logits.shape , _snake_case) UpperCAmelCase_ = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6]) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4))
7
0
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class __snake_case ( a ): UpperCAmelCase__ : Optional[int] = (DPMSolverSinglestepScheduler,) UpperCAmelCase__ : str = (('''num_inference_steps''', 2_5),) def lowerCamelCase ( self : Dict , **_snake_case : Dict): """simple docstring""" UpperCAmelCase_ = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''prediction_type''': '''epsilon''', '''thresholding''': False, '''sample_max_value''': 1.0, '''algorithm_type''': '''dpmsolver++''', '''solver_type''': '''midpoint''', '''lambda_min_clipped''': -float('''inf'''), '''variance_type''': None, } config.update(**_snake_case) return config def lowerCamelCase ( self : Dict , _snake_case : int=0 , **_snake_case : List[Any]): """simple docstring""" UpperCAmelCase_ = dict(self.forward_default_kwargs) UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case) UpperCAmelCase_ = self.dummy_sample UpperCAmelCase_ = 0.1 * sample UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: UpperCAmelCase_ = self.get_scheduler_config(**_snake_case) UpperCAmelCase_ = scheduler_class(**_snake_case) scheduler.set_timesteps(_snake_case) # copy over dummy past residuals UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_snake_case) UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case) new_scheduler.set_timesteps(_snake_case) # copy over dummy past residuals UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order] UpperCAmelCase_ , UpperCAmelCase_ = sample, sample for t in range(_snake_case , time_step + scheduler.config.solver_order + 1): UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def lowerCamelCase ( self : Tuple): """simple docstring""" pass def lowerCamelCase ( self : Tuple , _snake_case : Optional[Any]=0 , **_snake_case : int): """simple docstring""" UpperCAmelCase_ = dict(self.forward_default_kwargs) UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case) UpperCAmelCase_ = self.dummy_sample UpperCAmelCase_ = 0.1 * sample UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: UpperCAmelCase_ = self.get_scheduler_config() UpperCAmelCase_ = scheduler_class(**_snake_case) scheduler.set_timesteps(_snake_case) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_snake_case) UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case) # copy over dummy past residuals new_scheduler.set_timesteps(_snake_case) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order] UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def lowerCamelCase ( self : Dict , _snake_case : int=None , **_snake_case : Optional[Any]): """simple docstring""" if scheduler is None: UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config(**_snake_case) UpperCAmelCase_ = scheduler_class(**_snake_case) UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config(**_snake_case) UpperCAmelCase_ = scheduler_class(**_snake_case) UpperCAmelCase_ = 10 UpperCAmelCase_ = self.dummy_model() UpperCAmelCase_ = self.dummy_sample_deter scheduler.set_timesteps(_snake_case) for i, t in enumerate(scheduler.timesteps): UpperCAmelCase_ = model(_snake_case , _snake_case) UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample return sample def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config()) UpperCAmelCase_ = 50 UpperCAmelCase_ = self.dummy_model() UpperCAmelCase_ = self.dummy_sample_deter scheduler.set_timesteps(_snake_case) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:]): UpperCAmelCase_ = model(_snake_case , _snake_case) UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.2_5_7_4) < 1e-3 def lowerCamelCase ( self : int): """simple docstring""" for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=_snake_case) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config()) UpperCAmelCase_ = self.full_loop(scheduler=_snake_case) UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3 UpperCAmelCase_ = DEISMultistepScheduler.from_config(scheduler.config) UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config) UpperCAmelCase_ = UniPCMultistepScheduler.from_config(scheduler.config) UpperCAmelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config) UpperCAmelCase_ = self.full_loop(scheduler=_snake_case) UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3 def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" self.check_over_configs(thresholding=_snake_case) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , algorithm_type='''dpmsolver++''' , solver_order=_snake_case , solver_type=_snake_case , ) def lowerCamelCase ( self : Dict): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_snake_case) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , ) UpperCAmelCase_ = self.full_loop( solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , ) assert not torch.isnan(_snake_case).any(), "Samples have nan numbers" def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" self.check_over_configs(lower_order_final=_snake_case) self.check_over_configs(lower_order_final=_snake_case) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" self.check_over_configs(lambda_min_clipped=-float('''inf''')) self.check_over_configs(lambda_min_clipped=-5.1) def lowerCamelCase ( self : int): """simple docstring""" self.check_over_configs(variance_type=_snake_case) self.check_over_configs(variance_type='''learned_range''') def lowerCamelCase ( self : Optional[Any]): """simple docstring""" for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=_snake_case , time_step=0) def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = self.full_loop() UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3 def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = self.full_loop(use_karras_sigmas=_snake_case) UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.2_2_4_8) < 1e-3 def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''') UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.1_4_5_3) < 1e-3 def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_snake_case) UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.0_6_4_9) < 1e-3 def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config(thresholding=_snake_case , dynamic_thresholding_ratio=0) UpperCAmelCase_ = scheduler_class(**_snake_case) UpperCAmelCase_ = 10 UpperCAmelCase_ = self.dummy_model() UpperCAmelCase_ = self.dummy_sample_deter.half() scheduler.set_timesteps(_snake_case) for i, t in enumerate(scheduler.timesteps): UpperCAmelCase_ = model(_snake_case , _snake_case) UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample assert sample.dtype == torch.floataa
369
import comet # From: unbabel-comet import torch import datasets snake_case_ : Tuple = datasets.logging.get_logger(__name__) snake_case_ : str = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n" snake_case_ : Tuple = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n" snake_case_ : Optional[int] = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __snake_case ( datasets.Metric ): def lowerCamelCase ( self : Any): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''sources''': datasets.Value('''string''' , id='''sequence'''), '''predictions''': datasets.Value('''string''' , id='''sequence'''), '''references''': datasets.Value('''string''' , id='''sequence'''), }) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[ '''https://github.com/Unbabel/COMET''', '''https://www.aclweb.org/anthology/2020.emnlp-main.213/''', '''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''', ] , ) def lowerCamelCase ( self : List[Any] , _snake_case : Optional[int]): """simple docstring""" if self.config_name == "default": UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''')) else: UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model(self.config_name)) def lowerCamelCase ( self : List[Any] , _snake_case : str , _snake_case : List[str] , _snake_case : Tuple , _snake_case : int=None , _snake_case : Optional[Any]=False): """simple docstring""" if gpus is None: UpperCAmelCase_ = 1 if torch.cuda.is_available() else 0 UpperCAmelCase_ = {'''src''': sources, '''mt''': predictions, '''ref''': references} UpperCAmelCase_ = [dict(zip(_snake_case , _snake_case)) for t in zip(*data.values())] UpperCAmelCase_ , UpperCAmelCase_ = self.scorer.predict(_snake_case , gpus=_snake_case , progress_bar=_snake_case) return {"mean_score": mean_score, "scores": scores}
7
0
import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class __snake_case ( a ): def __init__( self : List[Any] , _snake_case : NestedDataStructureLike[PathLike] , _snake_case : Optional[NamedSplit] = None , _snake_case : Optional[Features] = None , _snake_case : str = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : Optional[str] = None , _snake_case : Optional[int] = None , **_snake_case : Union[str, Any] , ): """simple docstring""" super().__init__( _snake_case , split=_snake_case , features=_snake_case , cache_dir=_snake_case , keep_in_memory=_snake_case , streaming=_snake_case , num_proc=_snake_case , **_snake_case , ) UpperCAmelCase_ = field UpperCAmelCase_ = path_or_paths if isinstance(_snake_case , _snake_case) else {self.split: path_or_paths} UpperCAmelCase_ = Json( cache_dir=_snake_case , data_files=_snake_case , features=_snake_case , field=_snake_case , **_snake_case , ) def lowerCamelCase ( self : List[str]): """simple docstring""" if self.streaming: UpperCAmelCase_ = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = None self.builder.download_and_prepare( download_config=_snake_case , download_mode=_snake_case , verification_mode=_snake_case , base_path=_snake_case , num_proc=self.num_proc , ) UpperCAmelCase_ = self.builder.as_dataset( split=self.split , verification_mode=_snake_case , in_memory=self.keep_in_memory) return dataset class __snake_case : def __init__( self : Dict , _snake_case : Dataset , _snake_case : Union[PathLike, BinaryIO] , _snake_case : Optional[int] = None , _snake_case : Optional[int] = None , **_snake_case : List[Any] , ): """simple docstring""" if num_proc is not None and num_proc <= 0: raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""") UpperCAmelCase_ = dataset UpperCAmelCase_ = path_or_buf UpperCAmelCase_ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE UpperCAmelCase_ = num_proc UpperCAmelCase_ = '''utf-8''' UpperCAmelCase_ = to_json_kwargs def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = self.to_json_kwargs.pop('''path_or_buf''' , _snake_case) UpperCAmelCase_ = self.to_json_kwargs.pop('''orient''' , '''records''') UpperCAmelCase_ = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False) UpperCAmelCase_ = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True) UpperCAmelCase_ = self.to_json_kwargs.pop('''compression''' , _snake_case) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""") if isinstance(self.path_or_buf , (str, bytes, os.PathLike)): with fsspec.open(self.path_or_buf , '''wb''' , compression=_snake_case) as buffer: UpperCAmelCase_ = self._write(file_obj=_snake_case , orient=_snake_case , lines=_snake_case , index=_snake_case , **self.to_json_kwargs) else: if compression: raise NotImplementedError( F"""The compression parameter is not supported when writing to a buffer, but compression={compression}""" ''' was passed. Please provide a local path instead.''') UpperCAmelCase_ = self._write( file_obj=self.path_or_buf , orient=_snake_case , lines=_snake_case , index=_snake_case , **self.to_json_kwargs) return written def lowerCamelCase ( self : Any , _snake_case : List[str]): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = args UpperCAmelCase_ = query_table( table=self.dataset.data , key=slice(_snake_case , offset + self.batch_size) , indices=self.dataset._indices , ) UpperCAmelCase_ = batch.to_pandas().to_json( path_or_buf=_snake_case , orient=_snake_case , lines=_snake_case , index=_snake_case , **_snake_case) if not json_str.endswith('''\n'''): json_str += "\n" return json_str.encode(self.encoding) def lowerCamelCase ( self : Union[str, Any] , _snake_case : BinaryIO , _snake_case : Any , _snake_case : str , _snake_case : Any , **_snake_case : str , ): """simple docstring""" UpperCAmelCase_ = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset) , self.batch_size) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ): UpperCAmelCase_ = self._batch_json((offset, orient, lines, index, to_json_kwargs)) written += file_obj.write(_snake_case) else: UpperCAmelCase_ , UpperCAmelCase_ = len(self.dataset), self.batch_size with multiprocessing.Pool(self.num_proc) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _snake_case , _snake_case)] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ): written += file_obj.write(_snake_case) return written
370
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class __snake_case ( a ): UpperCAmelCase__ : Optional[int] = (DPMSolverSinglestepScheduler,) UpperCAmelCase__ : str = (('''num_inference_steps''', 2_5),) def lowerCamelCase ( self : Dict , **_snake_case : Dict): """simple docstring""" UpperCAmelCase_ = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''prediction_type''': '''epsilon''', '''thresholding''': False, '''sample_max_value''': 1.0, '''algorithm_type''': '''dpmsolver++''', '''solver_type''': '''midpoint''', '''lambda_min_clipped''': -float('''inf'''), '''variance_type''': None, } config.update(**_snake_case) return config def lowerCamelCase ( self : Dict , _snake_case : int=0 , **_snake_case : List[Any]): """simple docstring""" UpperCAmelCase_ = dict(self.forward_default_kwargs) UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case) UpperCAmelCase_ = self.dummy_sample UpperCAmelCase_ = 0.1 * sample UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: UpperCAmelCase_ = self.get_scheduler_config(**_snake_case) UpperCAmelCase_ = scheduler_class(**_snake_case) scheduler.set_timesteps(_snake_case) # copy over dummy past residuals UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_snake_case) UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case) new_scheduler.set_timesteps(_snake_case) # copy over dummy past residuals UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order] UpperCAmelCase_ , UpperCAmelCase_ = sample, sample for t in range(_snake_case , time_step + scheduler.config.solver_order + 1): UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def lowerCamelCase ( self : Tuple): """simple docstring""" pass def lowerCamelCase ( self : Tuple , _snake_case : Optional[Any]=0 , **_snake_case : int): """simple docstring""" UpperCAmelCase_ = dict(self.forward_default_kwargs) UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case) UpperCAmelCase_ = self.dummy_sample UpperCAmelCase_ = 0.1 * sample UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: UpperCAmelCase_ = self.get_scheduler_config() UpperCAmelCase_ = scheduler_class(**_snake_case) scheduler.set_timesteps(_snake_case) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_snake_case) UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case) # copy over dummy past residuals new_scheduler.set_timesteps(_snake_case) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order] UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def lowerCamelCase ( self : Dict , _snake_case : int=None , **_snake_case : Optional[Any]): """simple docstring""" if scheduler is None: UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config(**_snake_case) UpperCAmelCase_ = scheduler_class(**_snake_case) UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config(**_snake_case) UpperCAmelCase_ = scheduler_class(**_snake_case) UpperCAmelCase_ = 10 UpperCAmelCase_ = self.dummy_model() UpperCAmelCase_ = self.dummy_sample_deter scheduler.set_timesteps(_snake_case) for i, t in enumerate(scheduler.timesteps): UpperCAmelCase_ = model(_snake_case , _snake_case) UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample return sample def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config()) UpperCAmelCase_ = 50 UpperCAmelCase_ = self.dummy_model() UpperCAmelCase_ = self.dummy_sample_deter scheduler.set_timesteps(_snake_case) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:]): UpperCAmelCase_ = model(_snake_case , _snake_case) UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.2_5_7_4) < 1e-3 def lowerCamelCase ( self : int): """simple docstring""" for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=_snake_case) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config()) UpperCAmelCase_ = self.full_loop(scheduler=_snake_case) UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3 UpperCAmelCase_ = DEISMultistepScheduler.from_config(scheduler.config) UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config) UpperCAmelCase_ = UniPCMultistepScheduler.from_config(scheduler.config) UpperCAmelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config) UpperCAmelCase_ = self.full_loop(scheduler=_snake_case) UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3 def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" self.check_over_configs(thresholding=_snake_case) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , algorithm_type='''dpmsolver++''' , solver_order=_snake_case , solver_type=_snake_case , ) def lowerCamelCase ( self : Dict): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_snake_case) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , ) UpperCAmelCase_ = self.full_loop( solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , ) assert not torch.isnan(_snake_case).any(), "Samples have nan numbers" def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" self.check_over_configs(lower_order_final=_snake_case) self.check_over_configs(lower_order_final=_snake_case) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" self.check_over_configs(lambda_min_clipped=-float('''inf''')) self.check_over_configs(lambda_min_clipped=-5.1) def lowerCamelCase ( self : int): """simple docstring""" self.check_over_configs(variance_type=_snake_case) self.check_over_configs(variance_type='''learned_range''') def lowerCamelCase ( self : Optional[Any]): """simple docstring""" for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=_snake_case , time_step=0) def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = self.full_loop() UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3 def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = self.full_loop(use_karras_sigmas=_snake_case) UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.2_2_4_8) < 1e-3 def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''') UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.1_4_5_3) < 1e-3 def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_snake_case) UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.0_6_4_9) < 1e-3 def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config(thresholding=_snake_case , dynamic_thresholding_ratio=0) UpperCAmelCase_ = scheduler_class(**_snake_case) UpperCAmelCase_ = 10 UpperCAmelCase_ = self.dummy_model() UpperCAmelCase_ = self.dummy_sample_deter.half() scheduler.set_timesteps(_snake_case) for i, t in enumerate(scheduler.timesteps): UpperCAmelCase_ = model(_snake_case , _snake_case) UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample assert sample.dtype == torch.floataa
7
0
import warnings warnings.warn( "memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: " "`from accelerate import find_executable_batch_size` to avoid this warning.", FutureWarning, )
371
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) snake_case_ : List[Any] = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Tuple = ["DeiTFeatureExtractor"] snake_case_ : List[str] = ["DeiTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : List[Any] = [ "DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "DeiTForImageClassification", "DeiTForImageClassificationWithTeacher", "DeiTForMaskedImageModeling", "DeiTModel", "DeiTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Dict = [ "TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDeiTForImageClassification", "TFDeiTForImageClassificationWithTeacher", "TFDeiTForMaskedImageModeling", "TFDeiTModel", "TFDeiTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys snake_case_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
7
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case_ : str = logging.get_logger(__name__) snake_case_ : List[str] = { "google/pix2struct-textcaps-base": ( "https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json" ), } class __snake_case ( a ): UpperCAmelCase__ : str = '''pix2struct_text_model''' UpperCAmelCase__ : Optional[Any] = ['''past_key_values'''] UpperCAmelCase__ : List[str] = { '''hidden_size''': '''hidden_size''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : List[Any] , _snake_case : Optional[Any]=50244 , _snake_case : List[str]=768 , _snake_case : Tuple=64 , _snake_case : Tuple=2048 , _snake_case : Any=12 , _snake_case : int=12 , _snake_case : Dict=32 , _snake_case : Union[str, Any]=128 , _snake_case : Optional[int]=0.1 , _snake_case : str=1e-6 , _snake_case : int=1.0 , _snake_case : List[Any]="gelu_new" , _snake_case : Optional[int]=0 , _snake_case : Union[str, Any]=False , _snake_case : int=0 , _snake_case : Optional[int]=1 , _snake_case : int=False , _snake_case : Any=True , **_snake_case : str , ): """simple docstring""" UpperCAmelCase_ = vocab_size UpperCAmelCase_ = hidden_size UpperCAmelCase_ = d_kv UpperCAmelCase_ = d_ff UpperCAmelCase_ = num_layers UpperCAmelCase_ = num_heads UpperCAmelCase_ = relative_attention_num_buckets UpperCAmelCase_ = relative_attention_max_distance UpperCAmelCase_ = dropout_rate UpperCAmelCase_ = layer_norm_epsilon UpperCAmelCase_ = initializer_factor UpperCAmelCase_ = use_cache UpperCAmelCase_ = eos_token_id UpperCAmelCase_ = decoder_start_token_id # for backwards compatibility UpperCAmelCase_ = dense_act_fn super().__init__( pad_token_id=_snake_case , eos_token_id=_snake_case , decoder_start_token_id=_snake_case , tie_word_embeddings=_snake_case , is_decoder=_snake_case , **_snake_case , ) @classmethod def lowerCamelCase ( cls : Optional[Any] , _snake_case : Union[str, os.PathLike] , **_snake_case : Union[str, Any]): """simple docstring""" cls._set_token_in_kwargs(_snake_case) UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_snake_case , **_snake_case) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''') == "pix2struct": UpperCAmelCase_ = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""") return cls.from_dict(_snake_case , **_snake_case) class __snake_case ( a ): UpperCAmelCase__ : str = '''pix2struct_vision_model''' def __init__( self : Any , _snake_case : List[str]=768 , _snake_case : str=768 , _snake_case : List[Any]=2048 , _snake_case : Optional[Any]=64 , _snake_case : Union[str, Any]=12 , _snake_case : Optional[int]=12 , _snake_case : List[str]="gelu_new" , _snake_case : Any=1e-6 , _snake_case : Dict=0.0 , _snake_case : Dict=0.0 , _snake_case : Optional[Any]=1e-10 , _snake_case : List[Any]=1.0 , _snake_case : int=4096 , _snake_case : List[str]=32 , _snake_case : List[str]=128 , **_snake_case : List[str] , ): """simple docstring""" super().__init__(**_snake_case) UpperCAmelCase_ = hidden_size UpperCAmelCase_ = patch_embed_hidden_size UpperCAmelCase_ = d_ff UpperCAmelCase_ = dropout_rate UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = initializer_range UpperCAmelCase_ = initializer_factor UpperCAmelCase_ = attention_dropout UpperCAmelCase_ = layer_norm_eps UpperCAmelCase_ = dense_act_fn UpperCAmelCase_ = seq_len UpperCAmelCase_ = relative_attention_num_buckets UpperCAmelCase_ = relative_attention_max_distance UpperCAmelCase_ = d_kv @classmethod def lowerCamelCase ( cls : Tuple , _snake_case : Union[str, os.PathLike] , **_snake_case : str): """simple docstring""" cls._set_token_in_kwargs(_snake_case) UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_snake_case , **_snake_case) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''') == "pix2struct": UpperCAmelCase_ = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""") return cls.from_dict(_snake_case , **_snake_case) class __snake_case ( a ): UpperCAmelCase__ : Optional[Any] = '''pix2struct''' UpperCAmelCase__ : int = True def __init__( self : Any , _snake_case : Any=None , _snake_case : Union[str, Any]=None , _snake_case : Union[str, Any]=1.0 , _snake_case : List[str]=0.0_2 , _snake_case : str=False , _snake_case : str=False , _snake_case : str=True , **_snake_case : str , ): """simple docstring""" super().__init__(tie_word_embeddings=_snake_case , is_encoder_decoder=_snake_case , **_snake_case) if text_config is None: UpperCAmelCase_ = {} logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''') if vision_config is None: UpperCAmelCase_ = {} logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''') UpperCAmelCase_ = PixaStructTextConfig(**_snake_case) UpperCAmelCase_ = PixaStructVisionConfig(**_snake_case) UpperCAmelCase_ = self.text_config.decoder_start_token_id UpperCAmelCase_ = self.text_config.pad_token_id UpperCAmelCase_ = self.text_config.eos_token_id UpperCAmelCase_ = initializer_factor UpperCAmelCase_ = initializer_range UpperCAmelCase_ = self.initializer_range UpperCAmelCase_ = self.initializer_range UpperCAmelCase_ = is_vqa @classmethod def lowerCamelCase ( cls : List[Any] , _snake_case : PixaStructTextConfig , _snake_case : PixaStructVisionConfig , **_snake_case : Optional[int]): """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_snake_case) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = copy.deepcopy(self.__dict__) UpperCAmelCase_ = self.text_config.to_dict() UpperCAmelCase_ = self.vision_config.to_dict() UpperCAmelCase_ = self.__class__.model_type return output
350
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets snake_case_ : Dict = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n" snake_case_ : List[str] = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n" snake_case_ : List[Any] = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __snake_case ( datasets.Metric ): def lowerCamelCase ( self : Tuple): """simple docstring""" if version.parse(scb.__version__) < version.parse('''1.4.12'''): raise ImportWarning( '''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n''' '''You can install it with `pip install "sacrebleu>=1.4.12"`.''') return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence'''), '''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''') , id='''references'''), }) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[ '''https://github.com/jhclark/tercom''', ] , ) def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , ): """simple docstring""" UpperCAmelCase_ = len(references[0]) if any(len(_snake_case) != references_per_prediction for refs in references): raise ValueError('''Sacrebleu requires the same number of references for each prediction''') UpperCAmelCase_ = [[refs[i] for refs in references] for i in range(_snake_case)] UpperCAmelCase_ = TER( normalized=_snake_case , no_punct=_snake_case , asian_support=_snake_case , case_sensitive=_snake_case , ) UpperCAmelCase_ = sb_ter.corpus_score(_snake_case , _snake_case) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
7
0
import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __snake_case : @staticmethod def lowerCamelCase ( *_snake_case : List[str] , **_snake_case : str): """simple docstring""" pass @is_pipeline_test @require_torch @require_vision class __snake_case ( unittest.TestCase ): UpperCAmelCase__ : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def lowerCamelCase ( self : Any , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''') UpperCAmelCase_ = [ { '''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''), '''question''': '''How many cats are there?''', }, { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''question''': '''How many cats are there?''', }, ] return vqa_pipeline, examples def lowerCamelCase ( self : Optional[int] , _snake_case : List[str] , _snake_case : Optional[int]): """simple docstring""" UpperCAmelCase_ = vqa_pipeline(_snake_case , top_k=1) self.assertEqual( _snake_case , [ [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}], [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}], ] , ) @require_torch def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''') UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png''' UpperCAmelCase_ = '''How many cats are there?''' UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2) self.assertEqual( _snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}]) UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2) self.assertEqual( _snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}]) @slow @require_torch def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''') UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png''' UpperCAmelCase_ = '''How many cats are there?''' UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2) self.assertEqual( nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]) UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2) self.assertEqual( nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]) UpperCAmelCase_ = vqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2) self.assertEqual( nested_simplify(_snake_case , decimals=4) , [[{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]] * 2 , ) @require_tf @unittest.skip('''Visual question answering not implemented in TF''') def lowerCamelCase ( self : Tuple): """simple docstring""" pass
351
import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class __snake_case ( unittest.TestCase , a ): def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = load_tool('''text-to-speech''') self.tool.setup() def lowerCamelCase ( self : int): """simple docstring""" torch.manual_seed(0) UpperCAmelCase_ = self.tool('''hey''') UpperCAmelCase_ = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , )) def lowerCamelCase ( self : Any): """simple docstring""" torch.manual_seed(0) UpperCAmelCase_ = self.tool('''hey''') UpperCAmelCase_ = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , ))
7
0
import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class __snake_case ( a ): UpperCAmelCase__ : int = (KDPMaDiscreteScheduler,) UpperCAmelCase__ : List[str] = 1_0 def lowerCamelCase ( self : str , **_snake_case : Dict): """simple docstring""" UpperCAmelCase_ = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', } config.update(**_snake_case) return config def lowerCamelCase ( self : List[Any]): """simple docstring""" for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_snake_case) def lowerCamelCase ( self : Optional[int]): """simple docstring""" for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2]): self.check_over_configs(beta_start=_snake_case , beta_end=_snake_case) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_snake_case) def lowerCamelCase ( self : List[str]): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_snake_case) def lowerCamelCase ( self : Any): """simple docstring""" UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config(prediction_type='''v_prediction''') UpperCAmelCase_ = scheduler_class(**_snake_case) scheduler.set_timesteps(self.num_inference_steps) UpperCAmelCase_ = self.dummy_model() UpperCAmelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma UpperCAmelCase_ = sample.to(_snake_case) for i, t in enumerate(scheduler.timesteps): UpperCAmelCase_ = scheduler.scale_model_input(_snake_case , _snake_case) UpperCAmelCase_ = model(_snake_case , _snake_case) UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case) UpperCAmelCase_ = output.prev_sample UpperCAmelCase_ = torch.sum(torch.abs(_snake_case)) UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.69_34e-07) < 1e-2 assert abs(result_mean.item() - 6.11_12e-10) < 1e-3 else: # CUDA assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72e-07) < 1e-2 assert abs(result_mean.item() - 0.0_0_0_2) < 1e-3 def lowerCamelCase ( self : Optional[Any]): """simple docstring""" if torch_device == "mps": return UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config() UpperCAmelCase_ = scheduler_class(**_snake_case) scheduler.set_timesteps(self.num_inference_steps) UpperCAmelCase_ = self.dummy_model() UpperCAmelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma UpperCAmelCase_ = sample.to(_snake_case) for i, t in enumerate(scheduler.timesteps): UpperCAmelCase_ = scheduler.scale_model_input(_snake_case , _snake_case) UpperCAmelCase_ = model(_snake_case , _snake_case) UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case) UpperCAmelCase_ = output.prev_sample UpperCAmelCase_ = torch.sum(torch.abs(_snake_case)) UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.4125) < 1e-2 assert abs(result_mean.item() - 0.0_2_6_6) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.4125) < 1e-2 assert abs(result_mean.item() - 0.0_2_6_6) < 1e-3 def lowerCamelCase ( self : Any): """simple docstring""" if torch_device == "mps": return UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config() UpperCAmelCase_ = scheduler_class(**_snake_case) scheduler.set_timesteps(self.num_inference_steps , device=_snake_case) UpperCAmelCase_ = self.dummy_model() UpperCAmelCase_ = self.dummy_sample_deter.to(_snake_case) * scheduler.init_noise_sigma for t in scheduler.timesteps: UpperCAmelCase_ = scheduler.scale_model_input(_snake_case , _snake_case) UpperCAmelCase_ = model(_snake_case , _snake_case) UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case) UpperCAmelCase_ = output.prev_sample UpperCAmelCase_ = torch.sum(torch.abs(_snake_case)) UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) if str(_snake_case).startswith('''cpu'''): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.4125) < 1e-2 assert abs(result_mean.item() - 0.0_2_6_6) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.4125) < 1e-2 assert abs(result_mean.item() - 0.0_2_6_6) < 1e-3
352
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) else: from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel, UTransformeraDModel from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
7
0
"""simple docstring""" import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("1.0.0a"): raise Exception("requires fairseq >= 1.0.0a") logging.set_verbosity_info() snake_case_ : Dict = logging.get_logger(__name__) snake_case_ : List[str] = "Hello world! cécé herlolip" def A (__A : str , __A : str , __A : bool ) -> str: """simple docstring""" UpperCAmelCase_ = FairseqRobertaModel.from_pretrained(__A ) roberta.eval() # disable dropout UpperCAmelCase_ = roberta.model.encoder.sentence_encoder UpperCAmelCase_ = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , ) if classification_head: UpperCAmelCase_ = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0] print('''Our RoBERTa config:''' , __A ) UpperCAmelCase_ = XLMRobertaXLForSequenceClassification(__A ) if classification_head else XLMRobertaXLForMaskedLM(__A ) model.eval() # Now let's copy all the weights. # Embeddings UpperCAmelCase_ = roberta_sent_encoder.embed_tokens.weight UpperCAmelCase_ = roberta_sent_encoder.embed_positions.weight UpperCAmelCase_ = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. UpperCAmelCase_ = roberta_sent_encoder.layer_norm.weight UpperCAmelCase_ = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer UpperCAmelCase_ = model.roberta.encoder.layer[i] UpperCAmelCase_ = roberta_sent_encoder.layers[i] UpperCAmelCase_ = layer.attention UpperCAmelCase_ = roberta_layer.self_attn_layer_norm.weight UpperCAmelCase_ = roberta_layer.self_attn_layer_norm.bias # self attention UpperCAmelCase_ = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) UpperCAmelCase_ = roberta_layer.self_attn.q_proj.weight UpperCAmelCase_ = roberta_layer.self_attn.q_proj.bias UpperCAmelCase_ = roberta_layer.self_attn.k_proj.weight UpperCAmelCase_ = roberta_layer.self_attn.k_proj.bias UpperCAmelCase_ = roberta_layer.self_attn.v_proj.weight UpperCAmelCase_ = roberta_layer.self_attn.v_proj.bias # self-attention output UpperCAmelCase_ = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape UpperCAmelCase_ = roberta_layer.self_attn.out_proj.weight UpperCAmelCase_ = roberta_layer.self_attn.out_proj.bias # this one is final layer norm UpperCAmelCase_ = roberta_layer.final_layer_norm.weight UpperCAmelCase_ = roberta_layer.final_layer_norm.bias # intermediate UpperCAmelCase_ = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape UpperCAmelCase_ = roberta_layer.fca.weight UpperCAmelCase_ = roberta_layer.fca.bias # output UpperCAmelCase_ = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape UpperCAmelCase_ = roberta_layer.fca.weight UpperCAmelCase_ = roberta_layer.fca.bias # end of layer if classification_head: UpperCAmelCase_ = roberta.model.classification_heads['''mnli'''].dense.weight UpperCAmelCase_ = roberta.model.classification_heads['''mnli'''].dense.bias UpperCAmelCase_ = roberta.model.classification_heads['''mnli'''].out_proj.weight UpperCAmelCase_ = roberta.model.classification_heads['''mnli'''].out_proj.bias else: # LM Head UpperCAmelCase_ = roberta.model.encoder.lm_head.dense.weight UpperCAmelCase_ = roberta.model.encoder.lm_head.dense.bias UpperCAmelCase_ = roberta.model.encoder.lm_head.layer_norm.weight UpperCAmelCase_ = roberta.model.encoder.lm_head.layer_norm.bias UpperCAmelCase_ = roberta.model.encoder.lm_head.weight UpperCAmelCase_ = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. UpperCAmelCase_ = roberta.encode(__A ).unsqueeze(0 ) # batch of size 1 UpperCAmelCase_ = model(__A )[0] if classification_head: UpperCAmelCase_ = roberta.model.classification_heads['''mnli'''](roberta.extract_features(__A ) ) else: UpperCAmelCase_ = roberta.model(__A )[0] print(our_output.shape , their_output.shape ) UpperCAmelCase_ = torch.max(torch.abs(our_output - their_output ) ).item() print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7 UpperCAmelCase_ = torch.allclose(__A , __A , atol=1E-3 ) print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' ) if not success: raise Exception('''Something went wRoNg''' ) pathlib.Path(__A ).mkdir(parents=__A , exist_ok=__A ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(__A ) if __name__ == "__main__": snake_case_ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--classification_head", action="store_true", help="Whether to convert a final classification head." ) snake_case_ : str = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
353
import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __snake_case : @staticmethod def lowerCamelCase ( *_snake_case : List[str] , **_snake_case : str): """simple docstring""" pass @is_pipeline_test @require_torch @require_vision class __snake_case ( unittest.TestCase ): UpperCAmelCase__ : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def lowerCamelCase ( self : Any , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''') UpperCAmelCase_ = [ { '''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''), '''question''': '''How many cats are there?''', }, { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''question''': '''How many cats are there?''', }, ] return vqa_pipeline, examples def lowerCamelCase ( self : Optional[int] , _snake_case : List[str] , _snake_case : Optional[int]): """simple docstring""" UpperCAmelCase_ = vqa_pipeline(_snake_case , top_k=1) self.assertEqual( _snake_case , [ [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}], [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}], ] , ) @require_torch def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''') UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png''' UpperCAmelCase_ = '''How many cats are there?''' UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2) self.assertEqual( _snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}]) UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2) self.assertEqual( _snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}]) @slow @require_torch def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''') UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png''' UpperCAmelCase_ = '''How many cats are there?''' UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2) self.assertEqual( nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]) UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2) self.assertEqual( nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]) UpperCAmelCase_ = vqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2) self.assertEqual( nested_simplify(_snake_case , decimals=4) , [[{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]] * 2 , ) @require_tf @unittest.skip('''Visual question answering not implemented in TF''') def lowerCamelCase ( self : Tuple): """simple docstring""" pass
7
0
def __A (__A : int = 600851475143 ) -> int: """simple docstring""" try: UpperCAmelCase_ = int(__A ) except (TypeError, ValueError): raise TypeError('''Parameter n must be int or castable to int.''' ) if n <= 0: raise ValueError('''Parameter n must be greater than or equal to one.''' ) UpperCAmelCase_ = 2 UpperCAmelCase_ = 0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 UpperCAmelCase_ = i while n % i == 0: UpperCAmelCase_ = n // i i += 1 return int(__A ) if __name__ == "__main__": print(f"{solution() = }")
354
from timeit import timeit def A (__A : int ) -> int: """simple docstring""" if number < 0: raise ValueError('''the value of input must not be negative''' ) UpperCAmelCase_ = 0 while number: number &= number - 1 result += 1 return result def A (__A : int ) -> int: """simple docstring""" if number < 0: raise ValueError('''the value of input must not be negative''' ) UpperCAmelCase_ = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def A () -> None: """simple docstring""" def do_benchmark(__A : int ) -> None: UpperCAmelCase_ = '''import __main__ as z''' print(F"""Benchmark when {number = }:""" ) print(F"""{get_set_bits_count_using_modulo_operator(__A ) = }""" ) UpperCAmelCase_ = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=__A ) print(F"""timeit() runs in {timing} seconds""" ) print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__A ) = }""" ) UpperCAmelCase_ = timeit( '''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=__A , ) print(F"""timeit() runs in {timing} seconds""" ) for number in (25, 37, 58, 0): do_benchmark(__A ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
7
0
import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class __snake_case ( unittest.TestCase ): UpperCAmelCase__ : List[str] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCAmelCase__ : List[str] = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def lowerCamelCase ( self : Dict , _snake_case : Any , _snake_case : Any , _snake_case : Tuple): """simple docstring""" UpperCAmelCase_ = TextaTextGenerationPipeline(model=_snake_case , tokenizer=_snake_case) return generator, ["Something to write", "Something else"] def lowerCamelCase ( self : Dict , _snake_case : List[Any] , _snake_case : int): """simple docstring""" UpperCAmelCase_ = generator('''Something there''') self.assertEqual(_snake_case , [{'''generated_text''': ANY(_snake_case)}]) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''')) UpperCAmelCase_ = generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=_snake_case) self.assertEqual( _snake_case , [ [{'''generated_text''': ANY(_snake_case)}, {'''generated_text''': ANY(_snake_case)}], [{'''generated_text''': ANY(_snake_case)}, {'''generated_text''': ANY(_snake_case)}], ] , ) UpperCAmelCase_ = generator( ['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=_snake_case) self.assertEqual( _snake_case , [ [{'''generated_text''': ANY(_snake_case)}, {'''generated_text''': ANY(_snake_case)}], [{'''generated_text''': ANY(_snake_case)}, {'''generated_text''': ANY(_snake_case)}], ] , ) with self.assertRaises(_snake_case): generator(4) @require_torch def lowerCamelCase ( self : Any): """simple docstring""" UpperCAmelCase_ = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''pt''') # do_sample=False necessary for reproducibility UpperCAmelCase_ = generator('''Something there''' , do_sample=_snake_case) self.assertEqual(_snake_case , [{'''generated_text''': ''''''}]) UpperCAmelCase_ = 3 UpperCAmelCase_ = generator( '''Something there''' , num_return_sequences=_snake_case , num_beams=_snake_case , ) UpperCAmelCase_ = [ {'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide Beide'''}, {'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide'''}, {'''generated_text''': ''''''}, ] self.assertEqual(_snake_case , _snake_case) UpperCAmelCase_ = generator('''This is a test''' , do_sample=_snake_case , num_return_sequences=2 , return_tensors=_snake_case) self.assertEqual( _snake_case , [ {'''generated_token_ids''': ANY(torch.Tensor)}, {'''generated_token_ids''': ANY(torch.Tensor)}, ] , ) UpperCAmelCase_ = generator.model.config.eos_token_id UpperCAmelCase_ = '''<pad>''' UpperCAmelCase_ = generator( ['''This is a test''', '''This is a second test'''] , do_sample=_snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=_snake_case , ) self.assertEqual( _snake_case , [ [ {'''generated_token_ids''': ANY(torch.Tensor)}, {'''generated_token_ids''': ANY(torch.Tensor)}, ], [ {'''generated_token_ids''': ANY(torch.Tensor)}, {'''generated_token_ids''': ANY(torch.Tensor)}, ], ] , ) @require_tf def lowerCamelCase ( self : Any): """simple docstring""" UpperCAmelCase_ = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''tf''') # do_sample=False necessary for reproducibility UpperCAmelCase_ = generator('''Something there''' , do_sample=_snake_case) self.assertEqual(_snake_case , [{'''generated_text''': ''''''}])
355
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class __snake_case ( unittest.TestCase ): def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = 10 def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = [1, 2, 3, 4] UpperCAmelCase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case) def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case) def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case) def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = '''It was the year of Our Lord one thousand seven hundred and seventy-five.\n\nSpiritual revelations were conceded to England at that favoured period, as at this.''' UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case) self.assertEqual(_snake_case , []) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = '''''' UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case) self.assertEqual(_snake_case , []) self.assertEqual(_snake_case , []) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = ( '''It was the year of Our Lord one thousand seven hundred and ''' '''seventy-five\n\nSpiritual revelations were conceded to England ''' '''at that favoured period, as at this.\n@highlight\n\nIt was the best of times''' ) UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case) UpperCAmelCase_ = [ '''It was the year of Our Lord one thousand seven hundred and seventy-five.''', '''Spiritual revelations were conceded to England at that favoured period, as at this.''', ] self.assertEqual(_snake_case , _snake_case) UpperCAmelCase_ = ['''It was the best of times.'''] self.assertEqual(_snake_case , _snake_case) def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = torch.tensor([1, 2, 3, 4]) UpperCAmelCase_ = torch.tensor([1, 1, 1, 1]) np.testing.assert_array_equal(build_mask(_snake_case , 0).numpy() , expected.numpy()) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = torch.tensor([1, 2, 3, 4, 23, 23, 23]) UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0]) np.testing.assert_array_equal(build_mask(_snake_case , 23).numpy() , expected.numpy()) def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1]) UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0]) np.testing.assert_array_equal(build_mask(_snake_case , 1).numpy() , expected.numpy()) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = 101 UpperCAmelCase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]]) UpperCAmelCase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]]) UpperCAmelCase_ = compute_token_type_ids(_snake_case , _snake_case) np.testing.assert_array_equal(_snake_case , _snake_case)
7
0
from __future__ import annotations from scipy.special import comb # type: ignore class __snake_case : def __init__( self : Union[str, Any] , _snake_case : list[tuple[float, float]]): """simple docstring""" UpperCAmelCase_ = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. UpperCAmelCase_ = len(_snake_case) - 1 def lowerCamelCase ( self : Optional[Any] , _snake_case : float): """simple docstring""" assert 0 <= t <= 1, "Time t must be between 0 and 1." UpperCAmelCase_ = [] for i in range(len(self.list_of_points)): # basis function for each i output_values.append( comb(self.degree , _snake_case) * ((1 - t) ** (self.degree - i)) * (t**i)) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(_snake_case) , 5) == 1 return output_values def lowerCamelCase ( self : str , _snake_case : float): """simple docstring""" assert 0 <= t <= 1, "Time t must be between 0 and 1." UpperCAmelCase_ = self.basis_function(_snake_case) UpperCAmelCase_ = 0.0 UpperCAmelCase_ = 0.0 for i in range(len(self.list_of_points)): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def lowerCamelCase ( self : Dict , _snake_case : float = 0.0_1): """simple docstring""" from matplotlib import pyplot as plt # type: ignore UpperCAmelCase_ = [] # x coordinates of points to plot UpperCAmelCase_ = [] # y coordinates of points to plot UpperCAmelCase_ = 0.0 while t <= 1: UpperCAmelCase_ = self.bezier_curve_function(_snake_case) to_plot_x.append(value[0]) to_plot_y.append(value[1]) t += step_size UpperCAmelCase_ = [i[0] for i in self.list_of_points] UpperCAmelCase_ = [i[1] for i in self.list_of_points] plt.plot( _snake_case , _snake_case , color='''blue''' , label='''Curve of Degree ''' + str(self.degree) , ) plt.scatter(_snake_case , _snake_case , color='''red''' , label='''Control Points''') plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
356
import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): snake_case_ : Any = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right snake_case_ : Optional[Any] = 128022 snake_case_ : Optional[int] = 128028 @require_sentencepiece class __snake_case ( a , unittest.TestCase ): UpperCAmelCase__ : List[str] = MaMaaaTokenizer UpperCAmelCase__ : int = False UpperCAmelCase__ : Dict = False UpperCAmelCase__ : List[str] = True def lowerCamelCase ( self : str): """simple docstring""" super().setUp() UpperCAmelCase_ = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>'''] UpperCAmelCase_ = dict(zip(_snake_case , range(len(_snake_case)))) UpperCAmelCase_ = Path(self.tmpdirname) save_json(_snake_case , save_dir / VOCAB_FILES_NAMES['''vocab_file''']) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(_snake_case , save_dir / VOCAB_FILES_NAMES['''spm_file''']) UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname) def lowerCamelCase ( self : str , **_snake_case : Union[str, Any]): """simple docstring""" return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_snake_case) def lowerCamelCase ( self : Optional[int] , _snake_case : List[str]): """simple docstring""" return ( "This is a test", "This is a test", ) def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ = '''</s>''' UpperCAmelCase_ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case) , _snake_case) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case) , _snake_case) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = list(tokenizer.get_vocab().keys()) self.assertEqual(vocab_keys[0] , '''</s>''') self.assertEqual(vocab_keys[1] , '''<unk>''') self.assertEqual(vocab_keys[-1] , '''<s>''') self.assertEqual(len(_snake_case) , tokenizer.vocab_size + len(tokenizer.get_added_vocab())) @unittest.skip('''Skip this test while all models are still to be uploaded.''') def lowerCamelCase ( self : Optional[int]): """simple docstring""" pass def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = tokenizer.tokenize('''This is a test''') self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']) self.assertListEqual( tokenizer.convert_tokens_to_ids(_snake_case) , [2, 3, 4, 5, 6] , ) UpperCAmelCase_ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6]) self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']) UpperCAmelCase_ = tokenizer.convert_tokens_to_string(_snake_case) self.assertEqual(_snake_case , '''This is a test''') @slow def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = {'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_snake_case , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , ) @require_torch @require_sentencepiece @require_tokenizers class __snake_case ( unittest.TestCase ): UpperCAmelCase__ : Dict = '''facebook/m2m100_418M''' UpperCAmelCase__ : Dict = [ '''In my opinion, there are two levels of response from the French government.''', '''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''', ] UpperCAmelCase__ : Dict = [ '''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''', '''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''', ] # fmt: off UpperCAmelCase__ : Any = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2] @classmethod def lowerCamelCase ( cls : Optional[Any]): """simple docstring""" UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''') UpperCAmelCase_ = 1 return cls def lowerCamelCase ( self : List[Any]): """simple docstring""" self.assertEqual(self.tokenizer.get_lang_id('''ar''') , 128006) self.assertEqual(self.tokenizer.get_lang_id('''en''') , 128022) self.assertEqual(self.tokenizer.get_lang_id('''ro''') , 128076) self.assertEqual(self.tokenizer.get_lang_id('''mr''') , 128063) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = self.tokenizer.get_vocab() self.assertEqual(len(_snake_case) , self.tokenizer.vocab_size) self.assertEqual(vocab['''<unk>'''] , 3) self.assertIn(self.tokenizer.get_lang_token('''en''') , _snake_case) def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = '''en''' UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0] self.assertListEqual(self.expected_src_tokens , _snake_case) def lowerCamelCase ( self : Any): """simple docstring""" self.assertIn(_snake_case , self.tokenizer.all_special_ids) # fmt: off UpperCAmelCase_ = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2] # fmt: on UpperCAmelCase_ = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case) UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_snake_case) self.assertEqual(_snake_case , _snake_case) self.assertNotIn(self.tokenizer.eos_token , _snake_case) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = tempfile.mkdtemp() UpperCAmelCase_ = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(_snake_case) UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(_snake_case) self.assertDictEqual(new_tok.lang_token_to_id , _snake_case) @require_torch def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = '''en''' UpperCAmelCase_ = '''fr''' UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_snake_case , return_tensors='''pt''') UpperCAmelCase_ = shift_tokens_right( batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id) for k in batch: UpperCAmelCase_ = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = '''mr''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')]) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id]) UpperCAmelCase_ = '''zh''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')]) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id]) @require_torch def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = '''mr''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')]) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id]) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)]) UpperCAmelCase_ = '''zh''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')]) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id]) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)]) @require_torch def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''') self.assertEqual( nested_simplify(_snake_case) , { # en_XX, A, test, EOS '''input_ids''': [[128022, 58, 4183, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 128006, } , )
7
0
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging snake_case_ : Any = logging.get_logger(__name__) snake_case_ : List[Any] = { "t5-small": "https://huggingface.co/t5-small/resolve/main/config.json", "t5-base": "https://huggingface.co/t5-base/resolve/main/config.json", "t5-large": "https://huggingface.co/t5-large/resolve/main/config.json", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json", } class __snake_case ( a ): UpperCAmelCase__ : Optional[Any] = '''t5''' UpperCAmelCase__ : Optional[int] = ['''past_key_values'''] UpperCAmelCase__ : List[str] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : Tuple , _snake_case : Optional[Any]=32128 , _snake_case : int=512 , _snake_case : Union[str, Any]=64 , _snake_case : List[str]=2048 , _snake_case : Tuple=6 , _snake_case : List[str]=None , _snake_case : List[Any]=8 , _snake_case : List[Any]=32 , _snake_case : Dict=128 , _snake_case : Tuple=0.1 , _snake_case : str=1e-6 , _snake_case : List[str]=1.0 , _snake_case : List[Any]="relu" , _snake_case : str=True , _snake_case : Optional[Any]=True , _snake_case : str=0 , _snake_case : int=1 , **_snake_case : int , ): """simple docstring""" UpperCAmelCase_ = vocab_size UpperCAmelCase_ = d_model UpperCAmelCase_ = d_kv UpperCAmelCase_ = d_ff UpperCAmelCase_ = num_layers UpperCAmelCase_ = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry UpperCAmelCase_ = num_heads UpperCAmelCase_ = relative_attention_num_buckets UpperCAmelCase_ = relative_attention_max_distance UpperCAmelCase_ = dropout_rate UpperCAmelCase_ = layer_norm_epsilon UpperCAmelCase_ = initializer_factor UpperCAmelCase_ = feed_forward_proj UpperCAmelCase_ = use_cache UpperCAmelCase_ = self.feed_forward_proj.split('''-''') UpperCAmelCase_ = act_info[-1] UpperCAmelCase_ = act_info[0] == '''gated''' if len(_snake_case) > 1 and act_info[0] != "gated" or len(_snake_case) > 2: raise ValueError( F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''') # for backwards compatibility if feed_forward_proj == "gated-gelu": UpperCAmelCase_ = '''gelu_new''' super().__init__( pad_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , **_snake_case , ) class __snake_case ( a ): @property def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = { '''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''}, '''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''}, } if self.use_past: UpperCAmelCase_ = '''past_encoder_sequence + sequence''' UpperCAmelCase_ = {0: '''batch'''} UpperCAmelCase_ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: UpperCAmelCase_ = {0: '''batch''', 1: '''decoder_sequence'''} UpperCAmelCase_ = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(_snake_case , direction='''inputs''') return common_inputs @property def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" return 13
357
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING snake_case_ : List[str] = logging.get_logger(__name__) @add_end_docstrings(a ) class __snake_case ( a ): def __init__( self : Tuple , *_snake_case : List[Any] , **_snake_case : Optional[Any]): """simple docstring""" super().__init__(*_snake_case , **_snake_case) self.check_model_type(_snake_case) def lowerCamelCase ( self : List[str] , _snake_case : Optional[int]=None , _snake_case : Optional[Any]=None , _snake_case : str=None , **_snake_case : Optional[Any]): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = {}, {} if padding is not None: UpperCAmelCase_ = padding if truncation is not None: UpperCAmelCase_ = truncation if top_k is not None: UpperCAmelCase_ = top_k return preprocess_params, {}, postprocess_params def __call__( self : List[Any] , _snake_case : Union["Image.Image", str] , _snake_case : str = None , **_snake_case : str): """simple docstring""" if isinstance(_snake_case , (Image.Image, str)) and isinstance(_snake_case , _snake_case): UpperCAmelCase_ = {'''image''': image, '''question''': question} else: UpperCAmelCase_ = image UpperCAmelCase_ = super().__call__(_snake_case , **_snake_case) return results def lowerCamelCase ( self : Union[str, Any] , _snake_case : int , _snake_case : Optional[int]=False , _snake_case : int=False): """simple docstring""" UpperCAmelCase_ = load_image(inputs['''image''']) UpperCAmelCase_ = self.tokenizer( inputs['''question'''] , return_tensors=self.framework , padding=_snake_case , truncation=_snake_case) UpperCAmelCase_ = self.image_processor(images=_snake_case , return_tensors=self.framework) model_inputs.update(_snake_case) return model_inputs def lowerCamelCase ( self : List[Any] , _snake_case : Optional[Any]): """simple docstring""" UpperCAmelCase_ = self.model(**_snake_case) return model_outputs def lowerCamelCase ( self : str , _snake_case : Optional[Any] , _snake_case : List[str]=5): """simple docstring""" if top_k > self.model.config.num_labels: UpperCAmelCase_ = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase_ = model_outputs.logits.sigmoid()[0] UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(_snake_case) else: raise ValueError(F"""Unsupported framework: {self.framework}""") UpperCAmelCase_ = scores.tolist() UpperCAmelCase_ = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_snake_case , _snake_case)]
7
0
def A (__A : int ) -> str: """simple docstring""" if isinstance(__A , __A ): raise TypeError('''\'float\' object cannot be interpreted as an integer''' ) if isinstance(__A , __A ): raise TypeError('''\'str\' object cannot be interpreted as an integer''' ) if num == 0: return "0b0" UpperCAmelCase_ = False if num < 0: UpperCAmelCase_ = True UpperCAmelCase_ = -num UpperCAmelCase_ = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(__A ) for e in binary ) return "0b" + "".join(str(__A ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
358
import sys def A (__A : int ) -> Dict: """simple docstring""" UpperCAmelCase_ = len(__A ) UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )] UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )] for chain_length in range(2 , __A ): for a in range(1 , n - chain_length + 1 ): UpperCAmelCase_ = a + chain_length - 1 UpperCAmelCase_ = sys.maxsize for c in range(__A , __A ): UpperCAmelCase_ = ( matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b] ) if cost < matrix[a][b]: UpperCAmelCase_ = cost UpperCAmelCase_ = c return matrix, sol def A (__A : Any , __A : Dict , __A : Optional[int] ) -> Optional[int]: """simple docstring""" if i == j: print('''A''' + str(__A ) , end=''' ''' ) else: print('''(''' , end=''' ''' ) print_optiomal_solution(__A , __A , optimal_solution[i][j] ) print_optiomal_solution(__A , optimal_solution[i][j] + 1 , __A ) print(''')''' , end=''' ''' ) def A () -> List[str]: """simple docstring""" UpperCAmelCase_ = [30, 35, 15, 5, 10, 20, 25] UpperCAmelCase_ = len(__A ) # Size of matrix created from above array will be # 30*35 35*15 15*5 5*10 10*20 20*25 UpperCAmelCase_ , UpperCAmelCase_ = matrix_chain_order(__A ) print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) ) print_optiomal_solution(__A , 1 , n - 1 ) if __name__ == "__main__": main()
7
0
"""simple docstring""" import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __snake_case : def __init__( self : Any , _snake_case : Dict , _snake_case : Union[str, Any]=13 , _snake_case : Optional[int]=32 , _snake_case : str=3 , _snake_case : Any=4 , _snake_case : Optional[int]=[10, 20, 30, 40] , _snake_case : List[str]=[2, 2, 3, 2] , _snake_case : List[Any]=True , _snake_case : str=True , _snake_case : Any=37 , _snake_case : Any="gelu" , _snake_case : Optional[Any]=10 , _snake_case : int=0.0_2 , _snake_case : int=["stage2", "stage3", "stage4"] , _snake_case : Union[str, Any]=3 , _snake_case : Dict=None , ): """simple docstring""" UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = image_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = num_stages UpperCAmelCase_ = hidden_sizes UpperCAmelCase_ = depths UpperCAmelCase_ = is_training UpperCAmelCase_ = use_labels UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = type_sequence_label_size UpperCAmelCase_ = initializer_range UpperCAmelCase_ = out_features UpperCAmelCase_ = num_labels UpperCAmelCase_ = scope UpperCAmelCase_ = num_stages def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) UpperCAmelCase_ = None if self.use_labels: UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size) UpperCAmelCase_ = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self : int): """simple docstring""" return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def lowerCamelCase ( self : List[Any]): """simple docstring""" return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_snake_case , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_snake_case , loss_ignore_index=255 , num_labels=self.num_labels , ) def lowerCamelCase ( self : List[Any] , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : Optional[Any]): """simple docstring""" UpperCAmelCase_ = UperNetForSemanticSegmentation(config=_snake_case) model.to(_snake_case) model.eval() UpperCAmelCase_ = model(_snake_case) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size)) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) = config_and_inputs UpperCAmelCase_ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __snake_case ( a , a , unittest.TestCase ): UpperCAmelCase__ : List[str] = (UperNetForSemanticSegmentation,) if is_torch_available() else () UpperCAmelCase__ : Any = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {} UpperCAmelCase__ : str = False UpperCAmelCase__ : Optional[int] = False UpperCAmelCase__ : Optional[Any] = False UpperCAmelCase__ : Union[str, Any] = False UpperCAmelCase__ : Dict = False UpperCAmelCase__ : Tuple = False def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = UperNetModelTester(self) UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37) def lowerCamelCase ( self : str): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" return def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ = model_class(_snake_case) UpperCAmelCase_ = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ = [*signature.parameters.keys()] UpperCAmelCase_ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _snake_case) def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_snake_case) @unittest.skip(reason='''UperNet does not use inputs_embeds''') def lowerCamelCase ( self : Optional[int]): """simple docstring""" pass @unittest.skip(reason='''UperNet does not support input and output embeddings''') def lowerCamelCase ( self : Any): """simple docstring""" pass @unittest.skip(reason='''UperNet does not have a base model''') def lowerCamelCase ( self : Optional[Any]): """simple docstring""" pass @unittest.skip(reason='''UperNet does not have a base model''') def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" pass @require_torch_multi_gpu @unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''') def lowerCamelCase ( self : Optional[int]): """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''') def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" pass def lowerCamelCase ( self : int): """simple docstring""" def check_hidden_states_output(_snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : int): UpperCAmelCase_ = model_class(_snake_case) model.to(_snake_case) model.eval() with torch.no_grad(): UpperCAmelCase_ = model(**self._prepare_for_class(_snake_case , _snake_case)) UpperCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase_ = self.model_tester.num_stages self.assertEqual(len(_snake_case) , expected_num_stages + 1) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ = True check_hidden_states_output(_snake_case , _snake_case , _snake_case) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ = True check_hidden_states_output(_snake_case , _snake_case , _snake_case) def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ = _config_zero_init(_snake_case) UpperCAmelCase_ = _config_zero_init(configs_no_init.backbone_config) for model_class in self.all_model_classes: UpperCAmelCase_ = model_class(config=_snake_case) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip(reason='''UperNet does not have tied weights''') def lowerCamelCase ( self : Optional[Any]): """simple docstring""" pass @slow def lowerCamelCase ( self : Optional[int]): """simple docstring""" for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ = UperNetForSemanticSegmentation.from_pretrained(_snake_case) self.assertIsNotNone(_snake_case) def A () -> str: """simple docstring""" UpperCAmelCase_ = hf_hub_download( repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' ) UpperCAmelCase_ = Image.open(__A ).convert('''RGB''' ) return image @require_torch @require_vision @slow class __snake_case ( unittest.TestCase ): def lowerCamelCase ( self : Any): """simple docstring""" UpperCAmelCase_ = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''') UpperCAmelCase_ = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''').to(_snake_case) UpperCAmelCase_ = prepare_img() UpperCAmelCase_ = processor(images=_snake_case , return_tensors='''pt''').to(_snake_case) with torch.no_grad(): UpperCAmelCase_ = model(**_snake_case) UpperCAmelCase_ = torch.Size((1, model.config.num_labels, 512, 512)) self.assertEqual(outputs.logits.shape , _snake_case) UpperCAmelCase_ = torch.tensor( [[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]]).to(_snake_case) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _snake_case , atol=1e-4)) def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''') UpperCAmelCase_ = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''').to(_snake_case) UpperCAmelCase_ = prepare_img() UpperCAmelCase_ = processor(images=_snake_case , return_tensors='''pt''').to(_snake_case) with torch.no_grad(): UpperCAmelCase_ = model(**_snake_case) UpperCAmelCase_ = torch.Size((1, model.config.num_labels, 512, 512)) self.assertEqual(outputs.logits.shape , _snake_case) UpperCAmelCase_ = torch.tensor( [[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]]).to(_snake_case) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _snake_case , atol=1e-4))
359
import inspect import os import re from transformers.configuration_utils import PretrainedConfig from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py snake_case_ : int = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. snake_case_ : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS) snake_case_ : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING snake_case_ : Union[str, Any] = { # used to compute the property `self.chunk_length` "EncodecConfig": ["overlap"], # used as `self.bert_model = BertModel(config, ...)` "DPRConfig": True, # not used in modeling files, but it's an important information "FSMTConfig": ["langs"], # used internally in the configuration class file "GPTNeoConfig": ["attention_types"], # used internally in the configuration class file "EsmConfig": ["is_folding_model"], # used during training (despite we don't have training script for these models yet) "Mask2FormerConfig": ["ignore_value"], # `ignore_value` used during training (despite we don't have training script for these models yet) # `norm` used in conversion script (despite not using in the modeling file) "OneFormerConfig": ["ignore_value", "norm"], # used during preprocessing and collation, see `collating_graphormer.py` "GraphormerConfig": ["spatial_pos_max"], # used internally in the configuration class file "T5Config": ["feed_forward_proj"], # used internally in the configuration class file # `tokenizer_class` get default value `T5Tokenizer` intentionally "MT5Config": ["feed_forward_proj", "tokenizer_class"], "UMT5Config": ["feed_forward_proj", "tokenizer_class"], # used internally in the configuration class file "LongT5Config": ["feed_forward_proj"], # used internally in the configuration class file "SwitchTransformersConfig": ["feed_forward_proj"], # having default values other than `1e-5` - we can't fix them without breaking "BioGptConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "GLPNConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "SegformerConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "CvtConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "PerceiverConfig": ["layer_norm_eps"], # used internally to calculate the feature size "InformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate the feature size "TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate the feature size "AutoformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate `mlp_dim` "SamVisionConfig": ["mlp_ratio"], # For (head) training, but so far not implemented "ClapAudioConfig": ["num_classes"], # Not used, but providing useful information to users "SpeechT5HifiGanConfig": ["sampling_rate"], } # TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure SPECIAL_CASES_TO_ALLOW.update( { "CLIPSegConfig": True, "DeformableDetrConfig": True, "DetaConfig": True, "DinatConfig": True, "DonutSwinConfig": True, "EfficientFormerConfig": True, "FSMTConfig": True, "JukeboxConfig": True, "LayoutLMv2Config": True, "MaskFormerSwinConfig": True, "MT5Config": True, "NatConfig": True, "OneFormerConfig": True, "PerceiverConfig": True, "RagConfig": True, "SpeechT5Config": True, "SwinConfig": True, "Swin2SRConfig": True, "Swinv2Config": True, "SwitchTransformersConfig": True, "TableTransformerConfig": True, "TapasConfig": True, "TransfoXLConfig": True, "UniSpeechConfig": True, "UniSpeechSatConfig": True, "WavLMConfig": True, "WhisperConfig": True, # TODO: @Arthur (for `alignment_head` and `alignment_layer`) "JukeboxPriorConfig": True, # TODO: @Younes (for `is_decoder`) "Pix2StructTextConfig": True, } ) def A (__A : List[Any] , __A : Optional[int] , __A : str , __A : Dict ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ = False for attribute in attributes: for modeling_source in source_strings: # check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)` if ( F"""config.{attribute}""" in modeling_source or F"""getattr(config, \"{attribute}\"""" in modeling_source or F"""getattr(self.config, \"{attribute}\"""" in modeling_source ): UpperCAmelCase_ = True # Deal with multi-line cases elif ( re.search( RF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , __A , ) is not None ): UpperCAmelCase_ = True # `SequenceSummary` is called with `SequenceSummary(config)` elif attribute in [ "summary_type", "summary_use_proj", "summary_activation", "summary_last_dropout", "summary_proj_to_labels", "summary_first_dropout", ]: if "SequenceSummary" in modeling_source: UpperCAmelCase_ = True if attribute_used: break if attribute_used: break # common and important attributes, even if they do not always appear in the modeling files UpperCAmelCase_ = [ '''bos_index''', '''eos_index''', '''pad_index''', '''unk_index''', '''mask_index''', '''image_size''', '''use_cache''', '''out_features''', '''out_indices''', ] UpperCAmelCase_ = ['''encoder_no_repeat_ngram_size'''] # Special cases to be allowed UpperCAmelCase_ = True if not attribute_used: UpperCAmelCase_ = False for attribute in attributes: # Allow if the default value in the configuration class is different from the one in `PretrainedConfig` if attribute in ["is_encoder_decoder"] and default_value is True: UpperCAmelCase_ = True elif attribute in ["tie_word_embeddings"] and default_value is False: UpperCAmelCase_ = True # Allow cases without checking the default value in the configuration class elif attribute in attributes_to_allow + attributes_used_in_generation: UpperCAmelCase_ = True elif attribute.endswith('''_token_id''' ): UpperCAmelCase_ = True # configuration class specific cases if not case_allowed: UpperCAmelCase_ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] ) UpperCAmelCase_ = allowed_cases is True or attribute in allowed_cases return attribute_used or case_allowed def A (__A : Tuple ) -> List[Any]: """simple docstring""" UpperCAmelCase_ = dict(inspect.signature(config_class.__init__ ).parameters ) UpperCAmelCase_ = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']] UpperCAmelCase_ = [signature[param].default for param in parameter_names] # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long # as one variant is used, the test should pass UpperCAmelCase_ = {} if len(config_class.attribute_map ) > 0: UpperCAmelCase_ = {v: k for k, v in config_class.attribute_map.items()} # Get the path to modeling source files UpperCAmelCase_ = inspect.getsourcefile(__A ) UpperCAmelCase_ = os.path.dirname(__A ) # Let's check against all frameworks: as long as one framework uses an attribute, we are good. UpperCAmelCase_ = [os.path.join(__A , __A ) for fn in os.listdir(__A ) if fn.startswith('''modeling_''' )] # Get the source code strings UpperCAmelCase_ = [] for path in modeling_paths: if os.path.isfile(__A ): with open(__A ) as fp: modeling_sources.append(fp.read() ) UpperCAmelCase_ = [] for config_param, default_value in zip(__A , __A ): # `attributes` here is all the variant names for `config_param` UpperCAmelCase_ = [config_param] # some configuration classes have non-empty `attribute_map`, and both names could be used in the # corresponding modeling files. As long as one of them appears, it is fine. if config_param in reversed_attribute_map: attributes.append(reversed_attribute_map[config_param] ) if not check_attribute_being_used(__A , __A , __A , __A ): unused_attributes.append(attributes[0] ) return sorted(__A ) def A () -> Any: """simple docstring""" UpperCAmelCase_ = {} for _config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in _config_class.__module__: continue # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.) UpperCAmelCase_ = [ cls for name, cls in inspect.getmembers( inspect.getmodule(_config_class ) , lambda __A : inspect.isclass(__A ) and issubclass(__A , __A ) and inspect.getmodule(__A ) == inspect.getmodule(_config_class ) , ) ] for config_class in config_classes_in_module: UpperCAmelCase_ = check_config_attributes_being_used(__A ) if len(__A ) > 0: UpperCAmelCase_ = unused_attributes if len(__A ) > 0: UpperCAmelCase_ = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n''' for name, attributes in configs_with_unused_attributes.items(): error += F"""{name}: {attributes}\n""" raise ValueError(__A ) if __name__ == "__main__": check_config_attributes()
7
0
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class __snake_case ( unittest.TestCase ): @slow def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''') UpperCAmelCase_ = AutoTokenizer.from_pretrained('''google/mt5-small''') UpperCAmelCase_ = tokenizer('''Hello there''' , return_tensors='''np''').input_ids UpperCAmelCase_ = tokenizer('''Hi I am''' , return_tensors='''np''').input_ids UpperCAmelCase_ = shift_tokens_right(_snake_case , model.config.pad_token_id , model.config.decoder_start_token_id) UpperCAmelCase_ = model(_snake_case , decoder_input_ids=_snake_case).logits UpperCAmelCase_ = optax.softmax_cross_entropy(_snake_case , onehot(_snake_case , logits.shape[-1])).mean() UpperCAmelCase_ = -(labels.shape[-1] * loss.item()) UpperCAmelCase_ = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
360
import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class __snake_case ( a , unittest.TestCase ): UpperCAmelCase__ : Optional[Any] = FlaxAutoencoderKL @property def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = 4 UpperCAmelCase_ = 3 UpperCAmelCase_ = (32, 32) UpperCAmelCase_ = jax.random.PRNGKey(0) UpperCAmelCase_ = jax.random.uniform(_snake_case , ((batch_size, num_channels) + sizes)) return {"sample": image, "prng_key": prng_key} def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = { '''block_out_channels''': [32, 64], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 4, } UpperCAmelCase_ = self.dummy_input return init_dict, inputs_dict
7
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) snake_case_ : List[Any] = { "configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Optional[int] = [ "RESNET_PRETRAINED_MODEL_ARCHIVE_LIST", "ResNetForImageClassification", "ResNetModel", "ResNetPreTrainedModel", "ResNetBackbone", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : List[str] = [ "TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST", "TFResNetForImageClassification", "TFResNetModel", "TFResNetPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : List[str] = [ "FlaxResNetForImageClassification", "FlaxResNetModel", "FlaxResNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys snake_case_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
361
import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPTaConfig from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 snake_case_ : List[str] = { "return_dict": False, "output_hidden_states": True, "output_attentions": True, "torchscript": True, "torch_dtype": "float16", "use_bfloat16": True, "tf_legacy_loss": True, "pruned_heads": {"a": 1}, "tie_word_embeddings": False, "is_decoder": True, "cross_attention_hidden_size": 128, "add_cross_attention": True, "tie_encoder_decoder": True, "max_length": 50, "min_length": 3, "do_sample": True, "early_stopping": True, "num_beams": 3, "num_beam_groups": 3, "diversity_penalty": 0.5, "temperature": 2.0, "top_k": 10, "top_p": 0.7, "typical_p": 0.2, "repetition_penalty": 0.8, "length_penalty": 0.8, "no_repeat_ngram_size": 5, "encoder_no_repeat_ngram_size": 5, "bad_words_ids": [1, 2, 3], "num_return_sequences": 3, "chunk_size_feed_forward": 5, "output_scores": True, "return_dict_in_generate": True, "forced_bos_token_id": 2, "forced_eos_token_id": 3, "remove_invalid_values": True, "architectures": ["BertModel"], "finetuning_task": "translation", "id2label": {0: "label"}, "label2id": {"label": "0"}, "tokenizer_class": "BertTokenizerFast", "prefix": "prefix", "bos_token_id": 6, "pad_token_id": 7, "eos_token_id": 8, "sep_token_id": 9, "decoder_start_token_id": 10, "exponential_decay_length_penalty": (5, 1.01), "suppress_tokens": [0, 1], "begin_suppress_tokens": 2, "task_specific_params": {"translation": "some_params"}, "problem_type": "regression", } @is_staging_test class __snake_case ( unittest.TestCase ): @classmethod def lowerCamelCase ( cls : Optional[Any]): """simple docstring""" UpperCAmelCase_ = TOKEN HfFolder.save_token(_snake_case) @classmethod def lowerCamelCase ( cls : List[str]): """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-config''') except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''') except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-config''') except HTTPError: pass def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37) config.push_to_hub('''test-config''' , use_auth_token=self._token) UpperCAmelCase_ = BertConfig.from_pretrained(F"""{USER}/test-config""") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_snake_case , getattr(_snake_case , _snake_case)) # Reset repo delete_repo(token=self._token , repo_id='''test-config''') # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(_snake_case , repo_id='''test-config''' , push_to_hub=_snake_case , use_auth_token=self._token) UpperCAmelCase_ = BertConfig.from_pretrained(F"""{USER}/test-config""") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_snake_case , getattr(_snake_case , _snake_case)) def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37) config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token) UpperCAmelCase_ = BertConfig.from_pretrained('''valid_org/test-config-org''') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_snake_case , getattr(_snake_case , _snake_case)) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-config-org''') # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( _snake_case , repo_id='''valid_org/test-config-org''' , push_to_hub=_snake_case , use_auth_token=self._token) UpperCAmelCase_ = BertConfig.from_pretrained('''valid_org/test-config-org''') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_snake_case , getattr(_snake_case , _snake_case)) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" CustomConfig.register_for_auto_class() UpperCAmelCase_ = CustomConfig(attribute=42) config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''}) UpperCAmelCase_ = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=_snake_case) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''') self.assertEqual(new_config.attribute , 42) class __snake_case ( unittest.TestCase ): def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = GPTaConfig() # attempt to modify each of int/float/bool/str config records and verify they were updated UpperCAmelCase_ = c.n_embd + 1 # int UpperCAmelCase_ = c.resid_pdrop + 1.0 # float UpperCAmelCase_ = not c.scale_attn_weights # bool UpperCAmelCase_ = c.summary_type + '''foo''' # str c.update_from_string( F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""") self.assertEqual(_snake_case , c.n_embd , '''mismatch for key: n_embd''') self.assertEqual(_snake_case , c.resid_pdrop , '''mismatch for key: resid_pdrop''') self.assertEqual(_snake_case , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''') self.assertEqual(_snake_case , c.summary_type , '''mismatch for key: summary_type''') def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = PretrainedConfig() UpperCAmelCase_ = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( _snake_case , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version''']) UpperCAmelCase_ = [key for key, value in config_common_kwargs.items() if value == getattr(_snake_case , _snake_case)] if len(_snake_case) > 0: raise ValueError( '''The following keys are set with the default values in''' ''' `test_configuration_common.config_common_kwargs` pick another value for them:''' F""" {", ".join(_snake_case)}.""") def lowerCamelCase ( self : str): """simple docstring""" with self.assertRaises(_snake_case): # config is in subfolder, the following should not work without specifying the subfolder UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''') UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''') self.assertIsNotNone(_snake_case) def lowerCamelCase ( self : Any): """simple docstring""" UpperCAmelCase_ = mock.Mock() UpperCAmelCase_ = 500 UpperCAmelCase_ = {} UpperCAmelCase_ = HTTPError UpperCAmelCase_ = {} # Download this model to make sure it's in the cache. UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''') # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=_snake_case) as mock_head: UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''') # This check we did call the fake head request mock_head.assert_called() def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = BertConfig.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''') def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = AutoConfig.from_pretrained('''bert-base-cased''') UpperCAmelCase_ = ['''config.4.0.0.json'''] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(_snake_case) UpperCAmelCase_ = 2 json.dump(configuration.to_dict() , open(os.path.join(_snake_case , '''config.4.0.0.json''') , '''w''')) # This should pick the new configuration file as the version of Transformers is > 4.0.0 UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) self.assertEqual(new_configuration.hidden_size , 2) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 UpperCAmelCase_ = ['''config.42.0.0.json'''] UpperCAmelCase_ = 768 configuration.save_pretrained(_snake_case) shutil.move(os.path.join(_snake_case , '''config.4.0.0.json''') , os.path.join(_snake_case , '''config.42.0.0.json''')) UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) self.assertEqual(new_configuration.hidden_size , 768) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = '''hf-internal-testing/test-two-configs''' import transformers as new_transformers UpperCAmelCase_ = '''v4.0.0''' UpperCAmelCase_ , UpperCAmelCase_ = new_transformers.models.auto.AutoConfig.from_pretrained( _snake_case , return_unused_kwargs=_snake_case) self.assertEqual(new_configuration.hidden_size , 2) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(_snake_case , {}) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers UpperCAmelCase_ = '''v3.0.0''' UpperCAmelCase_ = old_transformers.models.auto.AutoConfig.from_pretrained(_snake_case) self.assertEqual(old_configuration.hidden_size , 768)
7
0
import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): snake_case_ : Any = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right snake_case_ : Optional[Any] = 128022 snake_case_ : Optional[int] = 128028 @require_sentencepiece class __snake_case ( a , unittest.TestCase ): UpperCAmelCase__ : List[str] = MaMaaaTokenizer UpperCAmelCase__ : int = False UpperCAmelCase__ : Dict = False UpperCAmelCase__ : List[str] = True def lowerCamelCase ( self : str): """simple docstring""" super().setUp() UpperCAmelCase_ = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>'''] UpperCAmelCase_ = dict(zip(_snake_case , range(len(_snake_case)))) UpperCAmelCase_ = Path(self.tmpdirname) save_json(_snake_case , save_dir / VOCAB_FILES_NAMES['''vocab_file''']) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(_snake_case , save_dir / VOCAB_FILES_NAMES['''spm_file''']) UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname) def lowerCamelCase ( self : str , **_snake_case : Union[str, Any]): """simple docstring""" return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_snake_case) def lowerCamelCase ( self : Optional[int] , _snake_case : List[str]): """simple docstring""" return ( "This is a test", "This is a test", ) def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ = '''</s>''' UpperCAmelCase_ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case) , _snake_case) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case) , _snake_case) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = list(tokenizer.get_vocab().keys()) self.assertEqual(vocab_keys[0] , '''</s>''') self.assertEqual(vocab_keys[1] , '''<unk>''') self.assertEqual(vocab_keys[-1] , '''<s>''') self.assertEqual(len(_snake_case) , tokenizer.vocab_size + len(tokenizer.get_added_vocab())) @unittest.skip('''Skip this test while all models are still to be uploaded.''') def lowerCamelCase ( self : Optional[int]): """simple docstring""" pass def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = tokenizer.tokenize('''This is a test''') self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']) self.assertListEqual( tokenizer.convert_tokens_to_ids(_snake_case) , [2, 3, 4, 5, 6] , ) UpperCAmelCase_ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6]) self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']) UpperCAmelCase_ = tokenizer.convert_tokens_to_string(_snake_case) self.assertEqual(_snake_case , '''This is a test''') @slow def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = {'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_snake_case , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , ) @require_torch @require_sentencepiece @require_tokenizers class __snake_case ( unittest.TestCase ): UpperCAmelCase__ : Dict = '''facebook/m2m100_418M''' UpperCAmelCase__ : Dict = [ '''In my opinion, there are two levels of response from the French government.''', '''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''', ] UpperCAmelCase__ : Dict = [ '''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''', '''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''', ] # fmt: off UpperCAmelCase__ : Any = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2] @classmethod def lowerCamelCase ( cls : Optional[Any]): """simple docstring""" UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''') UpperCAmelCase_ = 1 return cls def lowerCamelCase ( self : List[Any]): """simple docstring""" self.assertEqual(self.tokenizer.get_lang_id('''ar''') , 128006) self.assertEqual(self.tokenizer.get_lang_id('''en''') , 128022) self.assertEqual(self.tokenizer.get_lang_id('''ro''') , 128076) self.assertEqual(self.tokenizer.get_lang_id('''mr''') , 128063) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = self.tokenizer.get_vocab() self.assertEqual(len(_snake_case) , self.tokenizer.vocab_size) self.assertEqual(vocab['''<unk>'''] , 3) self.assertIn(self.tokenizer.get_lang_token('''en''') , _snake_case) def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = '''en''' UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0] self.assertListEqual(self.expected_src_tokens , _snake_case) def lowerCamelCase ( self : Any): """simple docstring""" self.assertIn(_snake_case , self.tokenizer.all_special_ids) # fmt: off UpperCAmelCase_ = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2] # fmt: on UpperCAmelCase_ = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case) UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_snake_case) self.assertEqual(_snake_case , _snake_case) self.assertNotIn(self.tokenizer.eos_token , _snake_case) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = tempfile.mkdtemp() UpperCAmelCase_ = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(_snake_case) UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(_snake_case) self.assertDictEqual(new_tok.lang_token_to_id , _snake_case) @require_torch def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = '''en''' UpperCAmelCase_ = '''fr''' UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_snake_case , return_tensors='''pt''') UpperCAmelCase_ = shift_tokens_right( batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id) for k in batch: UpperCAmelCase_ = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = '''mr''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')]) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id]) UpperCAmelCase_ = '''zh''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')]) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id]) @require_torch def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = '''mr''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')]) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id]) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)]) UpperCAmelCase_ = '''zh''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')]) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id]) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)]) @require_torch def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''') self.assertEqual( nested_simplify(_snake_case) , { # en_XX, A, test, EOS '''input_ids''': [[128022, 58, 4183, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 128006, } , )
362
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass snake_case_ : List[Any] = (3, 9, -11, 0, 7, 5, 1, -1) snake_case_ : str = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class __snake_case : UpperCAmelCase__ : int UpperCAmelCase__ : Node | None class __snake_case : def __init__( self : Optional[int] , _snake_case : Iterable[int]): """simple docstring""" UpperCAmelCase_ = None for i in sorted(_snake_case , reverse=_snake_case): UpperCAmelCase_ = Node(_snake_case , self.head) def __iter__( self : Dict): """simple docstring""" UpperCAmelCase_ = self.head while node: yield node.data UpperCAmelCase_ = node.next_node def __len__( self : int): """simple docstring""" return sum(1 for _ in self) def __str__( self : Optional[Any]): """simple docstring""" return " -> ".join([str(_snake_case) for node in self]) def A (__A : SortedLinkedList , __A : SortedLinkedList ) -> SortedLinkedList: """simple docstring""" return SortedLinkedList(list(__A ) + list(__A ) ) if __name__ == "__main__": import doctest doctest.testmod() snake_case_ : Union[str, Any] = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
7
0
from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class __snake_case : def __init__( self : Union[str, Any] , _snake_case : int , _snake_case : List[str]=13 , _snake_case : Any=7 , _snake_case : List[Any]=True , _snake_case : Any=True , _snake_case : Tuple=True , _snake_case : Any=True , _snake_case : int=99 , _snake_case : Dict=32 , _snake_case : Optional[int]=2 , _snake_case : Optional[Any]=4 , _snake_case : str=37 , _snake_case : Dict="gelu" , _snake_case : List[str]=0.1 , _snake_case : Any=0.1 , _snake_case : Optional[int]=512 , _snake_case : str=16 , _snake_case : Union[str, Any]=2 , _snake_case : Optional[Any]=0.0_2 , _snake_case : int=3 , _snake_case : List[str]=4 , _snake_case : Any=None , ): """simple docstring""" UpperCAmelCase_ = parent UpperCAmelCase_ = 13 UpperCAmelCase_ = 7 UpperCAmelCase_ = True UpperCAmelCase_ = True UpperCAmelCase_ = True UpperCAmelCase_ = True UpperCAmelCase_ = 99 UpperCAmelCase_ = 384 UpperCAmelCase_ = 2 UpperCAmelCase_ = 4 UpperCAmelCase_ = 37 UpperCAmelCase_ = '''gelu''' UpperCAmelCase_ = 0.1 UpperCAmelCase_ = 0.1 UpperCAmelCase_ = 512 UpperCAmelCase_ = 16 UpperCAmelCase_ = 2 UpperCAmelCase_ = 0.0_2 UpperCAmelCase_ = 3 UpperCAmelCase_ = 4 UpperCAmelCase_ = 128 UpperCAmelCase_ = 2 UpperCAmelCase_ = 9 UpperCAmelCase_ = 1 UpperCAmelCase_ = None def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) UpperCAmelCase_ = None if self.use_input_mask: UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length]) UpperCAmelCase_ = None if self.use_token_type_ids: UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = None if self.use_labels: UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size) UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices) UpperCAmelCase_ = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_snake_case , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase ( self : str , _snake_case : Any , _snake_case : Dict , _snake_case : str , _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : Tuple): """simple docstring""" UpperCAmelCase_ = TFConvBertModel(config=_snake_case) UpperCAmelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} UpperCAmelCase_ = [input_ids, input_mask] UpperCAmelCase_ = model(_snake_case) UpperCAmelCase_ = model(_snake_case) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def lowerCamelCase ( self : Dict , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Dict , _snake_case : Dict , _snake_case : str , _snake_case : Optional[int] , _snake_case : str): """simple docstring""" UpperCAmelCase_ = TFConvBertForMaskedLM(config=_snake_case) UpperCAmelCase_ = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } UpperCAmelCase_ = model(_snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def lowerCamelCase ( self : Any , _snake_case : int , _snake_case : List[str] , _snake_case : Union[str, Any] , _snake_case : int , _snake_case : List[str] , _snake_case : str , _snake_case : Optional[int]): """simple docstring""" UpperCAmelCase_ = self.num_labels UpperCAmelCase_ = TFConvBertForSequenceClassification(config=_snake_case) UpperCAmelCase_ = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } UpperCAmelCase_ = model(_snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def lowerCamelCase ( self : Union[str, Any] , _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : List[str] , _snake_case : Tuple): """simple docstring""" UpperCAmelCase_ = self.num_choices UpperCAmelCase_ = TFConvBertForMultipleChoice(config=_snake_case) UpperCAmelCase_ = tf.tile(tf.expand_dims(_snake_case , 1) , (1, self.num_choices, 1)) UpperCAmelCase_ = tf.tile(tf.expand_dims(_snake_case , 1) , (1, self.num_choices, 1)) UpperCAmelCase_ = tf.tile(tf.expand_dims(_snake_case , 1) , (1, self.num_choices, 1)) UpperCAmelCase_ = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } UpperCAmelCase_ = model(_snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def lowerCamelCase ( self : Tuple , _snake_case : Any , _snake_case : str , _snake_case : List[str] , _snake_case : int , _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : Optional[Any]): """simple docstring""" UpperCAmelCase_ = self.num_labels UpperCAmelCase_ = TFConvBertForTokenClassification(config=_snake_case) UpperCAmelCase_ = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } UpperCAmelCase_ = model(_snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def lowerCamelCase ( self : Tuple , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : Dict , _snake_case : List[Any] , _snake_case : str , _snake_case : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = TFConvBertForQuestionAnswering(config=_snake_case) UpperCAmelCase_ = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } UpperCAmelCase_ = model(_snake_case) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) = config_and_inputs UpperCAmelCase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class __snake_case ( a , a , unittest.TestCase ): UpperCAmelCase__ : List[Any] = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) UpperCAmelCase__ : List[Any] = ( { '''feature-extraction''': TFConvBertModel, '''fill-mask''': TFConvBertForMaskedLM, '''question-answering''': TFConvBertForQuestionAnswering, '''text-classification''': TFConvBertForSequenceClassification, '''token-classification''': TFConvBertForTokenClassification, '''zero-shot''': TFConvBertForSequenceClassification, } if is_tf_available() else {} ) UpperCAmelCase__ : Tuple = False UpperCAmelCase__ : List[str] = False UpperCAmelCase__ : List[str] = False def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = TFConvBertModelTester(self) UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , hidden_size=37) def lowerCamelCase ( self : List[Any]): """simple docstring""" self.config_tester.run_common_tests() def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case) def lowerCamelCase ( self : Any): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_snake_case) def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_snake_case) def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_snake_case) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_snake_case) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_snake_case) @slow def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ = True UpperCAmelCase_ = True if hasattr(_snake_case , '''use_cache'''): UpperCAmelCase_ = True UpperCAmelCase_ = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length) UpperCAmelCase_ = getattr(self.model_tester , '''key_length''' , _snake_case) for model_class in self.all_model_classes: UpperCAmelCase_ = self._prepare_for_class(_snake_case , _snake_case) UpperCAmelCase_ = model_class(_snake_case) UpperCAmelCase_ = len(model(_snake_case)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_snake_case , saved_model=_snake_case) UpperCAmelCase_ = os.path.join(_snake_case , '''saved_model''' , '''1''') UpperCAmelCase_ = tf.keras.models.load_model(_snake_case) UpperCAmelCase_ = model(_snake_case) if self.is_encoder_decoder: UpperCAmelCase_ = outputs['''encoder_hidden_states'''] UpperCAmelCase_ = outputs['''encoder_attentions'''] else: UpperCAmelCase_ = outputs['''hidden_states'''] UpperCAmelCase_ = outputs['''attentions'''] self.assertEqual(len(_snake_case) , _snake_case) UpperCAmelCase_ = getattr( self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(_snake_case) , _snake_case) self.assertListEqual( list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(_snake_case) , self.model_tester.num_hidden_layers) self.assertListEqual( list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''') self.assertIsNotNone(_snake_case) def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ = True UpperCAmelCase_ = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length) UpperCAmelCase_ = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length) UpperCAmelCase_ = getattr(self.model_tester , '''key_length''' , _snake_case) UpperCAmelCase_ = getattr(self.model_tester , '''key_length''' , _snake_case) def check_decoder_attentions_output(_snake_case : List[str]): UpperCAmelCase_ = len(_snake_case) self.assertEqual(out_len % 2 , 0) UpperCAmelCase_ = outputs.decoder_attentions self.assertEqual(len(_snake_case) , self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(_snake_case : Optional[Any]): UpperCAmelCase_ = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(_snake_case) , self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: UpperCAmelCase_ = True UpperCAmelCase_ = False UpperCAmelCase_ = model_class(_snake_case) UpperCAmelCase_ = model(self._prepare_for_class(_snake_case , _snake_case)) UpperCAmelCase_ = len(_snake_case) self.assertEqual(config.output_hidden_states , _snake_case) check_encoder_attentions_output(_snake_case) if self.is_encoder_decoder: UpperCAmelCase_ = model_class(_snake_case) UpperCAmelCase_ = model(self._prepare_for_class(_snake_case , _snake_case)) self.assertEqual(config.output_hidden_states , _snake_case) check_decoder_attentions_output(_snake_case) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] UpperCAmelCase_ = True UpperCAmelCase_ = model_class(_snake_case) UpperCAmelCase_ = model(self._prepare_for_class(_snake_case , _snake_case)) self.assertEqual(config.output_hidden_states , _snake_case) check_encoder_attentions_output(_snake_case) # Check attention is always last and order is fine UpperCAmelCase_ = True UpperCAmelCase_ = True UpperCAmelCase_ = model_class(_snake_case) UpperCAmelCase_ = model(self._prepare_for_class(_snake_case , _snake_case)) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_snake_case)) self.assertEqual(model.config.output_hidden_states , _snake_case) check_encoder_attentions_output(_snake_case) @require_tf class __snake_case ( unittest.TestCase ): @slow def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''') UpperCAmelCase_ = tf.constant([[0, 1, 2, 3, 4, 5]]) UpperCAmelCase_ = model(_snake_case)[0] UpperCAmelCase_ = [1, 6, 768] self.assertEqual(output.shape , _snake_case) UpperCAmelCase_ = tf.constant( [ [ [-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2], [0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4], [0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4], ] ]) tf.debugging.assert_near(output[:, :3, :3] , _snake_case , atol=1e-4)
363
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig snake_case_ : Union[str, Any] = logging.get_logger(__name__) class __snake_case : def __init__( self : int , _snake_case : List[Any] , _snake_case : Tuple): """simple docstring""" UpperCAmelCase_ = question_encoder UpperCAmelCase_ = generator UpperCAmelCase_ = self.question_encoder def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int]): """simple docstring""" if os.path.isfile(_snake_case): raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""") os.makedirs(_snake_case , exist_ok=_snake_case) UpperCAmelCase_ = os.path.join(_snake_case , '''question_encoder_tokenizer''') UpperCAmelCase_ = os.path.join(_snake_case , '''generator_tokenizer''') self.question_encoder.save_pretrained(_snake_case) self.generator.save_pretrained(_snake_case) @classmethod def lowerCamelCase ( cls : Optional[Any] , _snake_case : Optional[Any] , **_snake_case : Optional[int]): """simple docstring""" from ..auto.tokenization_auto import AutoTokenizer UpperCAmelCase_ = kwargs.pop('''config''' , _snake_case) if config is None: UpperCAmelCase_ = RagConfig.from_pretrained(_snake_case) UpperCAmelCase_ = AutoTokenizer.from_pretrained( _snake_case , config=config.question_encoder , subfolder='''question_encoder_tokenizer''') UpperCAmelCase_ = AutoTokenizer.from_pretrained( _snake_case , config=config.generator , subfolder='''generator_tokenizer''') return cls(question_encoder=_snake_case , generator=_snake_case) def __call__( self : List[Any] , *_snake_case : List[str] , **_snake_case : List[Any]): """simple docstring""" return self.current_tokenizer(*_snake_case , **_snake_case) def lowerCamelCase ( self : List[Any] , *_snake_case : str , **_snake_case : Union[str, Any]): """simple docstring""" return self.generator.batch_decode(*_snake_case , **_snake_case) def lowerCamelCase ( self : str , *_snake_case : Optional[int] , **_snake_case : Any): """simple docstring""" return self.generator.decode(*_snake_case , **_snake_case) def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = self.question_encoder def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = self.generator def lowerCamelCase ( self : Optional[Any] , _snake_case : List[str] , _snake_case : Optional[List[str]] = None , _snake_case : Optional[int] = None , _snake_case : Optional[int] = None , _snake_case : str = "longest" , _snake_case : str = None , _snake_case : bool = True , **_snake_case : Optional[int] , ): """simple docstring""" warnings.warn( '''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ''' '''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ''' '''context manager to prepare your targets. See the documentation of your specific tokenizer for more ''' '''details''' , _snake_case , ) if max_length is None: UpperCAmelCase_ = self.current_tokenizer.model_max_length UpperCAmelCase_ = self( _snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , max_length=_snake_case , padding=_snake_case , truncation=_snake_case , **_snake_case , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: UpperCAmelCase_ = self.current_tokenizer.model_max_length UpperCAmelCase_ = self( text_target=_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , **_snake_case , ) UpperCAmelCase_ = labels['''input_ids'''] return model_inputs
7
0
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def A () -> Optional[int]: """simple docstring""" UpperCAmelCase_ = ArgumentParser( description=( '''PyTorch TPU distributed training launch ''' '''helper utility that will spawn up ''' '''multiple distributed processes''' ) ) # Optional arguments for the launch helper parser.add_argument('''--num_cores''' , type=__A , default=1 , help='''Number of TPU cores to use (1 or 8).''' ) # positional parser.add_argument( '''training_script''' , type=__A , help=( '''The full path to the single TPU training ''' '''program/script to be launched in parallel, ''' '''followed by all the arguments for the ''' '''training script''' ) , ) # rest from the training program parser.add_argument('''training_script_args''' , nargs=__A ) return parser.parse_args() def A () -> Optional[Any]: """simple docstring""" UpperCAmelCase_ = parse_args() # Import training_script as a module. UpperCAmelCase_ = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) UpperCAmelCase_ = script_fpath.stem UpperCAmelCase_ = importlib.import_module(__A ) # Patch sys.argv UpperCAmelCase_ = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
364
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class __snake_case ( unittest.TestCase ): @slow def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-base''') UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]]) # The dog is cute and lives in the garden house UpperCAmelCase_ = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim UpperCAmelCase_ = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach() self.assertEqual(output.shape , _snake_case) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3)) @slow def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-large''') UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]]) # The dog is cute and lives in the garden house UpperCAmelCase_ = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim UpperCAmelCase_ = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach() self.assertEqual(output.shape , _snake_case) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3))
7
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) snake_case_ : Any = { "configuration_xlm_roberta": [ "XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMRobertaConfig", "XLMRobertaOnnxConfig", ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : List[str] = ["XLMRobertaTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Dict = ["XLMRobertaTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Optional[int] = [ "XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMRobertaForCausalLM", "XLMRobertaForMaskedLM", "XLMRobertaForMultipleChoice", "XLMRobertaForQuestionAnswering", "XLMRobertaForSequenceClassification", "XLMRobertaForTokenClassification", "XLMRobertaModel", "XLMRobertaPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Tuple = [ "TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLMRobertaForCausalLM", "TFXLMRobertaForMaskedLM", "TFXLMRobertaForMultipleChoice", "TFXLMRobertaForQuestionAnswering", "TFXLMRobertaForSequenceClassification", "TFXLMRobertaForTokenClassification", "TFXLMRobertaModel", "TFXLMRobertaPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Dict = [ "FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxXLMRobertaForMaskedLM", "FlaxXLMRobertaForCausalLM", "FlaxXLMRobertaForMultipleChoice", "FlaxXLMRobertaForQuestionAnswering", "FlaxXLMRobertaForSequenceClassification", "FlaxXLMRobertaForTokenClassification", "FlaxXLMRobertaModel", "FlaxXLMRobertaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys snake_case_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
365
from maths.prime_factors import prime_factors def A (__A : int ) -> int: """simple docstring""" if not isinstance(__A , __A ): UpperCAmelCase_ = F"""Input value of [number={number}] must be an integer""" raise TypeError(__A ) if number < 1: raise ValueError('''Input must be a positive integer''' ) return -1 if len(prime_factors(__A ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
7
0
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class __snake_case ( unittest.TestCase ): @slow def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-base''') UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]]) # The dog is cute and lives in the garden house UpperCAmelCase_ = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim UpperCAmelCase_ = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach() self.assertEqual(output.shape , _snake_case) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3)) @slow def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-large''') UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]]) # The dog is cute and lives in the garden house UpperCAmelCase_ = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim UpperCAmelCase_ = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach() self.assertEqual(output.shape , _snake_case) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3))
366
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class __snake_case ( unittest.TestCase ): def lowerCamelCase ( self : Optional[int] , _snake_case : Union[str, Any]): """simple docstring""" for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss''']): UpperCAmelCase_ = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(_snake_case) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = '''sgugger/tiny-distilbert-classification''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , only_pretrain_model=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , torchscript=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''') def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , fpaa=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) # set architectures equal to `None` UpperCAmelCase_ = None UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config]) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) @unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''') def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_snake_case , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config]) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tinier_bart''' UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config]) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config]) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tinier_bart''' UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config]) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , save_to_csv=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_snake_case , '''inf_time.csv''') , train_memory_csv_file=os.path.join(_snake_case , '''train_mem.csv''') , inference_memory_csv_file=os.path.join(_snake_case , '''inf_mem.csv''') , train_time_csv_file=os.path.join(_snake_case , '''train_time.csv''') , env_info_csv_file=os.path.join(_snake_case , '''env.csv''') , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) benchmark.run() self.assertTrue(Path(os.path.join(_snake_case , '''inf_time.csv''')).exists()) self.assertTrue(Path(os.path.join(_snake_case , '''train_time.csv''')).exists()) self.assertTrue(Path(os.path.join(_snake_case , '''inf_mem.csv''')).exists()) self.assertTrue(Path(os.path.join(_snake_case , '''train_mem.csv''')).exists()) self.assertTrue(Path(os.path.join(_snake_case , '''env.csv''')).exists()) def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(_snake_case : Tuple): self.assertTrue(hasattr(_snake_case , '''sequential''')) self.assertTrue(hasattr(_snake_case , '''cumulative''')) self.assertTrue(hasattr(_snake_case , '''current''')) self.assertTrue(hasattr(_snake_case , '''total''')) with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_snake_case , '''log.txt''') , log_print=_snake_case , trace_memory_line_by_line=_snake_case , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() _check_summary_is_not_empty(result.inference_summary) _check_summary_is_not_empty(result.train_summary) self.assertTrue(Path(os.path.join(_snake_case , '''log.txt''')).exists())
7
0
import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class __snake_case ( unittest.TestCase ): def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = torch.nn.Linear(10 , 10) UpperCAmelCase_ = torch.optim.SGD(model.parameters() , 0.1) UpperCAmelCase_ = Accelerator() UpperCAmelCase_ = accelerator.prepare(_snake_case) try: pickle.loads(pickle.dumps(_snake_case)) except Exception as e: self.fail(F"""Accelerated optimizer pickling failed with {e}""") AcceleratorState._reset_state()
367
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def A (__A : BertModel , __A : str , __A : str ) -> int: """simple docstring""" UpperCAmelCase_ = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''') UpperCAmelCase_ = ( ('''layer.''', '''layer_'''), ('''word_embeddings.weight''', '''word_embeddings'''), ('''position_embeddings.weight''', '''position_embeddings'''), ('''token_type_embeddings.weight''', '''token_type_embeddings'''), ('''.''', '''/'''), ('''LayerNorm/weight''', '''LayerNorm/gamma'''), ('''LayerNorm/bias''', '''LayerNorm/beta'''), ('''weight''', '''kernel'''), ) if not os.path.isdir(__A ): os.makedirs(__A ) UpperCAmelCase_ = model.state_dict() def to_tf_var_name(__A : str ): for patt, repl in iter(__A ): UpperCAmelCase_ = name.replace(__A , __A ) return F"""bert/{name}""" def create_tf_var(__A : np.ndarray , __A : str , __A : tf.Session ): UpperCAmelCase_ = tf.dtypes.as_dtype(tensor.dtype ) UpperCAmelCase_ = tf.get_variable(dtype=__A , shape=tensor.shape , name=__A , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__A ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: UpperCAmelCase_ = to_tf_var_name(__A ) UpperCAmelCase_ = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): UpperCAmelCase_ = torch_tensor.T UpperCAmelCase_ = create_tf_var(tensor=__A , name=__A , session=__A ) tf.keras.backend.set_value(__A , __A ) UpperCAmelCase_ = session.run(__A ) print(F"""Successfully created {tf_name}: {np.allclose(__A , __A )}""" ) UpperCAmelCase_ = tf.train.Saver(tf.trainable_variables() ) saver.save(__A , os.path.join(__A , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) ) def A (__A : Any=None ) -> str: """simple docstring""" UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--model_name''' , type=__A , required=__A , help='''model name e.g. bert-base-uncased''' ) parser.add_argument( '''--cache_dir''' , type=__A , default=__A , required=__A , help='''Directory containing pytorch model''' ) parser.add_argument('''--pytorch_model_path''' , type=__A , required=__A , help='''/path/to/<pytorch-model-name>.bin''' ) parser.add_argument('''--tf_cache_dir''' , type=__A , required=__A , help='''Directory in which to save tensorflow model''' ) UpperCAmelCase_ = parser.parse_args(__A ) UpperCAmelCase_ = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=__A , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
7
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin snake_case_ : int = False @skip_mps class __snake_case ( a , a , a , unittest.TestCase ): UpperCAmelCase__ : Any = StableDiffusionAttendAndExcitePipeline UpperCAmelCase__ : int = False UpperCAmelCase__ : Tuple = TEXT_TO_IMAGE_PARAMS UpperCAmelCase__ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} ) UpperCAmelCase__ : str = TEXT_TO_IMAGE_IMAGE_PARAMS UpperCAmelCase__ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def lowerCamelCase ( cls : Optional[int]): """simple docstring""" super().setUpClass() torch.use_deterministic_algorithms(_snake_case) @classmethod def lowerCamelCase ( cls : Optional[Any]): """simple docstring""" super().tearDownClass() torch.use_deterministic_algorithms(_snake_case) def lowerCamelCase ( self : List[str]): """simple docstring""" torch.manual_seed(0) UpperCAmelCase_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_snake_case , ) UpperCAmelCase_ = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , ) torch.manual_seed(0) UpperCAmelCase_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0) UpperCAmelCase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , ) UpperCAmelCase_ = CLIPTextModel(_snake_case) UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''') UpperCAmelCase_ = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def lowerCamelCase ( self : Dict , _snake_case : Optional[int] , _snake_case : List[Any]=0): """simple docstring""" if str(_snake_case).startswith('''mps'''): UpperCAmelCase_ = torch.manual_seed(_snake_case) else: UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(_snake_case) UpperCAmelCase_ = UpperCAmelCase_ = { '''prompt''': '''a cat and a frog''', '''token_indices''': [2, 5], '''generator''': generator, '''num_inference_steps''': 1, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''max_iter_to_alter''': 2, '''thresholds''': {0: 0.7}, } return inputs def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = '''cpu''' UpperCAmelCase_ = self.get_dummy_components() UpperCAmelCase_ = self.pipeline_class(**_snake_case) pipe.to(_snake_case) pipe.set_progress_bar_config(disable=_snake_case) UpperCAmelCase_ = self.get_dummy_inputs(_snake_case) UpperCAmelCase_ = pipe(**_snake_case).images UpperCAmelCase_ = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 64, 64, 3)) UpperCAmelCase_ = np.array( [0.6_3_9_0_5_3_6_4, 0.6_2_8_9_7_3_0_7, 0.4_8_5_9_9_0_1_7, 0.5_1_3_3_6_2_4, 0.5_5_5_0_0_4_8, 0.4_5_7_6_9_5_1_6, 0.5_0_3_2_6_9_7_3, 0.5_0_2_3_1_3_9, 0.4_5_3_8_4_4_9_6]) UpperCAmelCase_ = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(_snake_case , 1e-3) def lowerCamelCase ( self : str): """simple docstring""" super().test_cpu_offload_forward_pass(expected_max_diff=5e-4) def lowerCamelCase ( self : Dict): """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2]) def lowerCamelCase ( self : Optional[int]): """simple docstring""" self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3) def lowerCamelCase ( self : int): """simple docstring""" super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4) def lowerCamelCase ( self : List[Any]): """simple docstring""" super().test_save_load_local(expected_max_difference=5e-4) def lowerCamelCase ( self : Tuple): """simple docstring""" super().test_save_load_optional_components(expected_max_difference=4e-4) @require_torch_gpu @slow class __snake_case ( unittest.TestCase ): @classmethod def lowerCamelCase ( cls : Any): """simple docstring""" super().setUpClass() torch.use_deterministic_algorithms(_snake_case) @classmethod def lowerCamelCase ( cls : Tuple): """simple docstring""" super().tearDownClass() torch.use_deterministic_algorithms(_snake_case) def lowerCamelCase ( self : str): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = torch.manual_seed(51) UpperCAmelCase_ = StableDiffusionAttendAndExcitePipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , safety_checker=_snake_case , torch_dtype=torch.floataa) pipe.to('''cuda''') UpperCAmelCase_ = '''a painting of an elephant with glasses''' UpperCAmelCase_ = [5, 7] UpperCAmelCase_ = pipe( prompt=_snake_case , token_indices=_snake_case , guidance_scale=7.5 , generator=_snake_case , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0] UpperCAmelCase_ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''') assert np.abs((expected_image - image).max()) < 5e-1
368
import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __snake_case ( unittest.TestCase ): def __init__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict=3 , _snake_case : Dict=32 , _snake_case : List[str]=3 , _snake_case : Union[str, Any]=10 , _snake_case : Tuple=[10, 20, 30, 40] , _snake_case : Dict=[1, 1, 2, 1] , _snake_case : List[Any]=True , _snake_case : Dict=True , _snake_case : Union[str, Any]="relu" , _snake_case : Tuple=3 , _snake_case : Union[str, Any]=None , ): """simple docstring""" UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = image_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = embeddings_size UpperCAmelCase_ = hidden_sizes UpperCAmelCase_ = depths UpperCAmelCase_ = is_training UpperCAmelCase_ = use_labels UpperCAmelCase_ = hidden_act UpperCAmelCase_ = num_labels UpperCAmelCase_ = scope UpperCAmelCase_ = len(_snake_case) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) UpperCAmelCase_ = self.get_config() return config, pixel_values def lowerCamelCase ( self : List[Any]): """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowerCamelCase ( self : Optional[int] , _snake_case : List[Any] , _snake_case : Optional[int]): """simple docstring""" UpperCAmelCase_ = FlaxRegNetModel(config=_snake_case) UpperCAmelCase_ = model(_snake_case) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : Optional[int]): """simple docstring""" UpperCAmelCase_ = self.num_labels UpperCAmelCase_ = FlaxRegNetForImageClassification(config=_snake_case) UpperCAmelCase_ = model(_snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs UpperCAmelCase_ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_flax class __snake_case ( a , unittest.TestCase ): UpperCAmelCase__ : Union[str, Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () UpperCAmelCase__ : Tuple = False UpperCAmelCase__ : List[str] = False UpperCAmelCase__ : int = False def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = FlaxRegNetModelTester(self) UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case) def lowerCamelCase ( self : List[Any]): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase ( self : List[str]): """simple docstring""" return def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case) @unittest.skip(reason='''RegNet does not use inputs_embeds''') def lowerCamelCase ( self : Optional[Any]): """simple docstring""" pass @unittest.skip(reason='''RegNet does not support input and output embeddings''') def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" pass def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ = model_class(_snake_case) UpperCAmelCase_ = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ = [*signature.parameters.keys()] UpperCAmelCase_ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _snake_case) def lowerCamelCase ( self : Optional[int]): """simple docstring""" def check_hidden_states_output(_snake_case : List[str] , _snake_case : Dict , _snake_case : List[str]): UpperCAmelCase_ = model_class(_snake_case) UpperCAmelCase_ = model(**self._prepare_for_class(_snake_case , _snake_case)) UpperCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase_ = self.model_tester.num_stages self.assertEqual(len(_snake_case) , expected_num_stages + 1) UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ = True check_hidden_states_output(_snake_case , _snake_case , _snake_case) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ = True check_hidden_states_output(_snake_case , _snake_case , _snake_case) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): UpperCAmelCase_ = self._prepare_for_class(_snake_case , _snake_case) UpperCAmelCase_ = model_class(_snake_case) @jax.jit def model_jitted(_snake_case : str , **_snake_case : Union[str, Any]): return model(pixel_values=_snake_case , **_snake_case) with self.subTest('''JIT Enabled'''): UpperCAmelCase_ = model_jitted(**_snake_case).to_tuple() with self.subTest('''JIT Disabled'''): with jax.disable_jit(): UpperCAmelCase_ = model_jitted(**_snake_case).to_tuple() self.assertEqual(len(_snake_case) , len(_snake_case)) for jitted_output, output in zip(_snake_case , _snake_case): self.assertEqual(jitted_output.shape , output.shape) def A () -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_flax class __snake_case ( unittest.TestCase ): @cached_property def lowerCamelCase ( self : Dict): """simple docstring""" return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''') if is_vision_available() else None @slow def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''') UpperCAmelCase_ = self.default_image_processor UpperCAmelCase_ = prepare_img() UpperCAmelCase_ = image_processor(images=_snake_case , return_tensors='''np''') UpperCAmelCase_ = model(**_snake_case) # verify the logits UpperCAmelCase_ = (1, 1000) self.assertEqual(outputs.logits.shape , _snake_case) UpperCAmelCase_ = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6]) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4))
7
0
import math def A (__A : list , __A : int = 0 , __A : int = 0 ) -> list: """simple docstring""" UpperCAmelCase_ = end or len(__A ) for i in range(__A , __A ): UpperCAmelCase_ = i UpperCAmelCase_ = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: UpperCAmelCase_ = array[temp_index - 1] temp_index -= 1 UpperCAmelCase_ = temp_index_value return array def A (__A : list , __A : int , __A : int ) -> None: # Max Heap """simple docstring""" UpperCAmelCase_ = index UpperCAmelCase_ = 2 * index + 1 # Left Node UpperCAmelCase_ = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: UpperCAmelCase_ = left_index if right_index < heap_size and array[largest] < array[right_index]: UpperCAmelCase_ = right_index if largest != index: UpperCAmelCase_ , UpperCAmelCase_ = array[largest], array[index] heapify(__A , __A , __A ) def A (__A : list ) -> list: """simple docstring""" UpperCAmelCase_ = len(__A ) for i in range(n // 2 , -1 , -1 ): heapify(__A , __A , __A ) for i in range(n - 1 , 0 , -1 ): UpperCAmelCase_ , UpperCAmelCase_ = array[0], array[i] heapify(__A , 0 , __A ) return array def A (__A : list , __A : int , __A : int , __A : int ) -> int: """simple docstring""" if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def A (__A : list , __A : int , __A : int , __A : int ) -> int: """simple docstring""" UpperCAmelCase_ = low UpperCAmelCase_ = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i UpperCAmelCase_ , UpperCAmelCase_ = array[j], array[i] i += 1 def A (__A : list ) -> list: """simple docstring""" if len(__A ) == 0: return array UpperCAmelCase_ = 2 * math.ceil(math.loga(len(__A ) ) ) UpperCAmelCase_ = 16 return intro_sort(__A , 0 , len(__A ) , __A , __A ) def A (__A : list , __A : int , __A : int , __A : int , __A : int ) -> list: """simple docstring""" while end - start > size_threshold: if max_depth == 0: return heap_sort(__A ) max_depth -= 1 UpperCAmelCase_ = median_of_a(__A , __A , start + ((end - start) // 2) + 1 , end - 1 ) UpperCAmelCase_ = partition(__A , __A , __A , __A ) intro_sort(__A , __A , __A , __A , __A ) UpperCAmelCase_ = p return insertion_sort(__A , __A , __A ) if __name__ == "__main__": import doctest doctest.testmod() snake_case_ : Union[str, Any] = input("Enter numbers separated by a comma : ").strip() snake_case_ : Any = [float(item) for item in user_input.split(",")] print(sort(unsorted))
369
import comet # From: unbabel-comet import torch import datasets snake_case_ : Tuple = datasets.logging.get_logger(__name__) snake_case_ : str = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n" snake_case_ : Tuple = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n" snake_case_ : Optional[int] = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __snake_case ( datasets.Metric ): def lowerCamelCase ( self : Any): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''sources''': datasets.Value('''string''' , id='''sequence'''), '''predictions''': datasets.Value('''string''' , id='''sequence'''), '''references''': datasets.Value('''string''' , id='''sequence'''), }) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[ '''https://github.com/Unbabel/COMET''', '''https://www.aclweb.org/anthology/2020.emnlp-main.213/''', '''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''', ] , ) def lowerCamelCase ( self : List[Any] , _snake_case : Optional[int]): """simple docstring""" if self.config_name == "default": UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''')) else: UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model(self.config_name)) def lowerCamelCase ( self : List[Any] , _snake_case : str , _snake_case : List[str] , _snake_case : Tuple , _snake_case : int=None , _snake_case : Optional[Any]=False): """simple docstring""" if gpus is None: UpperCAmelCase_ = 1 if torch.cuda.is_available() else 0 UpperCAmelCase_ = {'''src''': sources, '''mt''': predictions, '''ref''': references} UpperCAmelCase_ = [dict(zip(_snake_case , _snake_case)) for t in zip(*data.values())] UpperCAmelCase_ , UpperCAmelCase_ = self.scorer.predict(_snake_case , gpus=_snake_case , progress_bar=_snake_case) return {"mean_score": mean_score, "scores": scores}
7
0
import os # Precomputes a list of the 100 first triangular numbers snake_case_ : Dict = [int(0.5 * n * (n + 1)) for n in range(1, 101)] def A () -> List[Any]: """simple docstring""" UpperCAmelCase_ = os.path.dirname(os.path.realpath(__A ) ) UpperCAmelCase_ = os.path.join(__A , '''words.txt''' ) UpperCAmelCase_ = '''''' with open(__A ) as f: UpperCAmelCase_ = f.readline() UpperCAmelCase_ = [word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )] UpperCAmelCase_ = [ word for word in [sum(ord(__A ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(__A ) if __name__ == "__main__": print(solution())
370
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class __snake_case ( a ): UpperCAmelCase__ : Optional[int] = (DPMSolverSinglestepScheduler,) UpperCAmelCase__ : str = (('''num_inference_steps''', 2_5),) def lowerCamelCase ( self : Dict , **_snake_case : Dict): """simple docstring""" UpperCAmelCase_ = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''prediction_type''': '''epsilon''', '''thresholding''': False, '''sample_max_value''': 1.0, '''algorithm_type''': '''dpmsolver++''', '''solver_type''': '''midpoint''', '''lambda_min_clipped''': -float('''inf'''), '''variance_type''': None, } config.update(**_snake_case) return config def lowerCamelCase ( self : Dict , _snake_case : int=0 , **_snake_case : List[Any]): """simple docstring""" UpperCAmelCase_ = dict(self.forward_default_kwargs) UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case) UpperCAmelCase_ = self.dummy_sample UpperCAmelCase_ = 0.1 * sample UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: UpperCAmelCase_ = self.get_scheduler_config(**_snake_case) UpperCAmelCase_ = scheduler_class(**_snake_case) scheduler.set_timesteps(_snake_case) # copy over dummy past residuals UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_snake_case) UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case) new_scheduler.set_timesteps(_snake_case) # copy over dummy past residuals UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order] UpperCAmelCase_ , UpperCAmelCase_ = sample, sample for t in range(_snake_case , time_step + scheduler.config.solver_order + 1): UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def lowerCamelCase ( self : Tuple): """simple docstring""" pass def lowerCamelCase ( self : Tuple , _snake_case : Optional[Any]=0 , **_snake_case : int): """simple docstring""" UpperCAmelCase_ = dict(self.forward_default_kwargs) UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case) UpperCAmelCase_ = self.dummy_sample UpperCAmelCase_ = 0.1 * sample UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: UpperCAmelCase_ = self.get_scheduler_config() UpperCAmelCase_ = scheduler_class(**_snake_case) scheduler.set_timesteps(_snake_case) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_snake_case) UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case) # copy over dummy past residuals new_scheduler.set_timesteps(_snake_case) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order] UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def lowerCamelCase ( self : Dict , _snake_case : int=None , **_snake_case : Optional[Any]): """simple docstring""" if scheduler is None: UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config(**_snake_case) UpperCAmelCase_ = scheduler_class(**_snake_case) UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config(**_snake_case) UpperCAmelCase_ = scheduler_class(**_snake_case) UpperCAmelCase_ = 10 UpperCAmelCase_ = self.dummy_model() UpperCAmelCase_ = self.dummy_sample_deter scheduler.set_timesteps(_snake_case) for i, t in enumerate(scheduler.timesteps): UpperCAmelCase_ = model(_snake_case , _snake_case) UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample return sample def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config()) UpperCAmelCase_ = 50 UpperCAmelCase_ = self.dummy_model() UpperCAmelCase_ = self.dummy_sample_deter scheduler.set_timesteps(_snake_case) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:]): UpperCAmelCase_ = model(_snake_case , _snake_case) UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.2_5_7_4) < 1e-3 def lowerCamelCase ( self : int): """simple docstring""" for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=_snake_case) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config()) UpperCAmelCase_ = self.full_loop(scheduler=_snake_case) UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3 UpperCAmelCase_ = DEISMultistepScheduler.from_config(scheduler.config) UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config) UpperCAmelCase_ = UniPCMultistepScheduler.from_config(scheduler.config) UpperCAmelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config) UpperCAmelCase_ = self.full_loop(scheduler=_snake_case) UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3 def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" self.check_over_configs(thresholding=_snake_case) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , algorithm_type='''dpmsolver++''' , solver_order=_snake_case , solver_type=_snake_case , ) def lowerCamelCase ( self : Dict): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_snake_case) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , ) UpperCAmelCase_ = self.full_loop( solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , ) assert not torch.isnan(_snake_case).any(), "Samples have nan numbers" def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" self.check_over_configs(lower_order_final=_snake_case) self.check_over_configs(lower_order_final=_snake_case) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" self.check_over_configs(lambda_min_clipped=-float('''inf''')) self.check_over_configs(lambda_min_clipped=-5.1) def lowerCamelCase ( self : int): """simple docstring""" self.check_over_configs(variance_type=_snake_case) self.check_over_configs(variance_type='''learned_range''') def lowerCamelCase ( self : Optional[Any]): """simple docstring""" for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=_snake_case , time_step=0) def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = self.full_loop() UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3 def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = self.full_loop(use_karras_sigmas=_snake_case) UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.2_2_4_8) < 1e-3 def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''') UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.1_4_5_3) < 1e-3 def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_snake_case) UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.0_6_4_9) < 1e-3 def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config(thresholding=_snake_case , dynamic_thresholding_ratio=0) UpperCAmelCase_ = scheduler_class(**_snake_case) UpperCAmelCase_ = 10 UpperCAmelCase_ = self.dummy_model() UpperCAmelCase_ = self.dummy_sample_deter.half() scheduler.set_timesteps(_snake_case) for i, t in enumerate(scheduler.timesteps): UpperCAmelCase_ = model(_snake_case , _snake_case) UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample assert sample.dtype == torch.floataa
7
0
import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPTaConfig from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 snake_case_ : List[str] = { "return_dict": False, "output_hidden_states": True, "output_attentions": True, "torchscript": True, "torch_dtype": "float16", "use_bfloat16": True, "tf_legacy_loss": True, "pruned_heads": {"a": 1}, "tie_word_embeddings": False, "is_decoder": True, "cross_attention_hidden_size": 128, "add_cross_attention": True, "tie_encoder_decoder": True, "max_length": 50, "min_length": 3, "do_sample": True, "early_stopping": True, "num_beams": 3, "num_beam_groups": 3, "diversity_penalty": 0.5, "temperature": 2.0, "top_k": 10, "top_p": 0.7, "typical_p": 0.2, "repetition_penalty": 0.8, "length_penalty": 0.8, "no_repeat_ngram_size": 5, "encoder_no_repeat_ngram_size": 5, "bad_words_ids": [1, 2, 3], "num_return_sequences": 3, "chunk_size_feed_forward": 5, "output_scores": True, "return_dict_in_generate": True, "forced_bos_token_id": 2, "forced_eos_token_id": 3, "remove_invalid_values": True, "architectures": ["BertModel"], "finetuning_task": "translation", "id2label": {0: "label"}, "label2id": {"label": "0"}, "tokenizer_class": "BertTokenizerFast", "prefix": "prefix", "bos_token_id": 6, "pad_token_id": 7, "eos_token_id": 8, "sep_token_id": 9, "decoder_start_token_id": 10, "exponential_decay_length_penalty": (5, 1.01), "suppress_tokens": [0, 1], "begin_suppress_tokens": 2, "task_specific_params": {"translation": "some_params"}, "problem_type": "regression", } @is_staging_test class __snake_case ( unittest.TestCase ): @classmethod def lowerCamelCase ( cls : Optional[Any]): """simple docstring""" UpperCAmelCase_ = TOKEN HfFolder.save_token(_snake_case) @classmethod def lowerCamelCase ( cls : List[str]): """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-config''') except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''') except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-config''') except HTTPError: pass def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37) config.push_to_hub('''test-config''' , use_auth_token=self._token) UpperCAmelCase_ = BertConfig.from_pretrained(F"""{USER}/test-config""") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_snake_case , getattr(_snake_case , _snake_case)) # Reset repo delete_repo(token=self._token , repo_id='''test-config''') # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(_snake_case , repo_id='''test-config''' , push_to_hub=_snake_case , use_auth_token=self._token) UpperCAmelCase_ = BertConfig.from_pretrained(F"""{USER}/test-config""") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_snake_case , getattr(_snake_case , _snake_case)) def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37) config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token) UpperCAmelCase_ = BertConfig.from_pretrained('''valid_org/test-config-org''') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_snake_case , getattr(_snake_case , _snake_case)) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-config-org''') # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( _snake_case , repo_id='''valid_org/test-config-org''' , push_to_hub=_snake_case , use_auth_token=self._token) UpperCAmelCase_ = BertConfig.from_pretrained('''valid_org/test-config-org''') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_snake_case , getattr(_snake_case , _snake_case)) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" CustomConfig.register_for_auto_class() UpperCAmelCase_ = CustomConfig(attribute=42) config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''}) UpperCAmelCase_ = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=_snake_case) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''') self.assertEqual(new_config.attribute , 42) class __snake_case ( unittest.TestCase ): def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = GPTaConfig() # attempt to modify each of int/float/bool/str config records and verify they were updated UpperCAmelCase_ = c.n_embd + 1 # int UpperCAmelCase_ = c.resid_pdrop + 1.0 # float UpperCAmelCase_ = not c.scale_attn_weights # bool UpperCAmelCase_ = c.summary_type + '''foo''' # str c.update_from_string( F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""") self.assertEqual(_snake_case , c.n_embd , '''mismatch for key: n_embd''') self.assertEqual(_snake_case , c.resid_pdrop , '''mismatch for key: resid_pdrop''') self.assertEqual(_snake_case , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''') self.assertEqual(_snake_case , c.summary_type , '''mismatch for key: summary_type''') def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = PretrainedConfig() UpperCAmelCase_ = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( _snake_case , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version''']) UpperCAmelCase_ = [key for key, value in config_common_kwargs.items() if value == getattr(_snake_case , _snake_case)] if len(_snake_case) > 0: raise ValueError( '''The following keys are set with the default values in''' ''' `test_configuration_common.config_common_kwargs` pick another value for them:''' F""" {", ".join(_snake_case)}.""") def lowerCamelCase ( self : str): """simple docstring""" with self.assertRaises(_snake_case): # config is in subfolder, the following should not work without specifying the subfolder UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''') UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''') self.assertIsNotNone(_snake_case) def lowerCamelCase ( self : Any): """simple docstring""" UpperCAmelCase_ = mock.Mock() UpperCAmelCase_ = 500 UpperCAmelCase_ = {} UpperCAmelCase_ = HTTPError UpperCAmelCase_ = {} # Download this model to make sure it's in the cache. UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''') # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=_snake_case) as mock_head: UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''') # This check we did call the fake head request mock_head.assert_called() def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = BertConfig.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''') def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = AutoConfig.from_pretrained('''bert-base-cased''') UpperCAmelCase_ = ['''config.4.0.0.json'''] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(_snake_case) UpperCAmelCase_ = 2 json.dump(configuration.to_dict() , open(os.path.join(_snake_case , '''config.4.0.0.json''') , '''w''')) # This should pick the new configuration file as the version of Transformers is > 4.0.0 UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) self.assertEqual(new_configuration.hidden_size , 2) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 UpperCAmelCase_ = ['''config.42.0.0.json'''] UpperCAmelCase_ = 768 configuration.save_pretrained(_snake_case) shutil.move(os.path.join(_snake_case , '''config.4.0.0.json''') , os.path.join(_snake_case , '''config.42.0.0.json''')) UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) self.assertEqual(new_configuration.hidden_size , 768) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = '''hf-internal-testing/test-two-configs''' import transformers as new_transformers UpperCAmelCase_ = '''v4.0.0''' UpperCAmelCase_ , UpperCAmelCase_ = new_transformers.models.auto.AutoConfig.from_pretrained( _snake_case , return_unused_kwargs=_snake_case) self.assertEqual(new_configuration.hidden_size , 2) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(_snake_case , {}) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers UpperCAmelCase_ = '''v3.0.0''' UpperCAmelCase_ = old_transformers.models.auto.AutoConfig.from_pretrained(_snake_case) self.assertEqual(old_configuration.hidden_size , 768)
371
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) snake_case_ : List[Any] = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Tuple = ["DeiTFeatureExtractor"] snake_case_ : List[str] = ["DeiTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : List[Any] = [ "DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "DeiTForImageClassification", "DeiTForImageClassificationWithTeacher", "DeiTForMaskedImageModeling", "DeiTModel", "DeiTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Dict = [ "TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDeiTForImageClassification", "TFDeiTForImageClassificationWithTeacher", "TFDeiTForMaskedImageModeling", "TFDeiTModel", "TFDeiTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys snake_case_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
7
0
import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging snake_case_ : List[Any] = logging.get_logger(__name__) snake_case_ : Optional[int] = { "vocab_file": "vocab.json", "tokenizer_config_file": "tokenizer_config.json", "merges_file": "merges.txt", } snake_case_ : List[str] = { "vocab_file": { "facebook/s2t-wav2vec2-large-en-de": ( "https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json" ), }, "tokenizer_config_file": { "facebook/s2t-wav2vec2-large-en-de": ( "https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json" ), }, "merges_file": { "facebook/s2t-wav2vec2-large-en-de": ( "https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt" ), }, } snake_case_ : Optional[Any] = "</w>" snake_case_ : Optional[int] = "@@ " def A (__A : Any ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ = set() UpperCAmelCase_ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCAmelCase_ = char return pairs # Speech2Text2 has no max input length snake_case_ : str = {"facebook/s2t-wav2vec2-large-en-de": 1024} class __snake_case ( a ): UpperCAmelCase__ : int = VOCAB_FILES_NAMES UpperCAmelCase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : Optional[Any] = ['''input_ids''', '''attention_mask'''] def __init__( self : Optional[Any] , _snake_case : Optional[int] , _snake_case : Tuple="<s>" , _snake_case : int="<pad>" , _snake_case : Tuple="</s>" , _snake_case : int="<unk>" , _snake_case : Optional[Any]=False , _snake_case : Optional[int]=None , **_snake_case : Optional[int] , ): """simple docstring""" super().__init__( unk_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , pad_token=_snake_case , do_lower_case=_snake_case , **_snake_case , ) UpperCAmelCase_ = do_lower_case with open(_snake_case , encoding='''utf-8''') as vocab_handle: UpperCAmelCase_ = json.load(_snake_case) UpperCAmelCase_ = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""") UpperCAmelCase_ = None UpperCAmelCase_ = None else: with open(_snake_case , encoding='''utf-8''') as merges_handle: UpperCAmelCase_ = merges_handle.read().split('''\n''')[:-1] UpperCAmelCase_ = [tuple(merge.split()[:2]) for merge in merges] UpperCAmelCase_ = dict(zip(_snake_case , range(len(_snake_case)))) UpperCAmelCase_ = {} @property def lowerCamelCase ( self : List[Any]): """simple docstring""" return len(self.decoder) def lowerCamelCase ( self : Any): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder) def lowerCamelCase ( self : List[Any] , _snake_case : List[Any]): """simple docstring""" UpperCAmelCase_ = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] UpperCAmelCase_ = get_pairs(_snake_case) if not pairs: return token while True: UpperCAmelCase_ = min(_snake_case , key=lambda _snake_case: self.bpe_ranks.get(_snake_case , float('''inf'''))) if bigram not in self.bpe_ranks: break UpperCAmelCase_ , UpperCAmelCase_ = bigram UpperCAmelCase_ = [] UpperCAmelCase_ = 0 while i < len(_snake_case): try: UpperCAmelCase_ = word.index(_snake_case , _snake_case) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) UpperCAmelCase_ = j if word[i] == first and i < len(_snake_case) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 UpperCAmelCase_ = tuple(_snake_case) UpperCAmelCase_ = new_word if len(_snake_case) == 1: break else: UpperCAmelCase_ = get_pairs(_snake_case) UpperCAmelCase_ = ''' '''.join(_snake_case) if word == "\n " + BPE_TOKEN_MERGES: UpperCAmelCase_ = '''\n''' + BPE_TOKEN_MERGES if word.endswith(_snake_case): UpperCAmelCase_ = word.replace(_snake_case , '''''') UpperCAmelCase_ = word.replace(''' ''' , _snake_case) UpperCAmelCase_ = word return word def lowerCamelCase ( self : int , _snake_case : Union[str, Any]): """simple docstring""" if self.bpe_ranks is None: raise ValueError( '''This tokenizer was instantiated without a `merges.txt` file, so''' ''' that it can only be used for decoding, not for encoding.''' '''Make sure to provide `merges.txt` file at instantiation to enable ''' '''encoding.''') if self.do_lower_case: UpperCAmelCase_ = text.lower() UpperCAmelCase_ = text.split() UpperCAmelCase_ = [] for token in text: if token: split_tokens.extend(list(self.bpe(_snake_case).split(''' '''))) return split_tokens def lowerCamelCase ( self : Optional[Any] , _snake_case : str): """simple docstring""" return self.encoder.get(_snake_case , self.encoder.get(self.unk_token)) def lowerCamelCase ( self : List[Any] , _snake_case : int): """simple docstring""" UpperCAmelCase_ = self.decoder.get(_snake_case , self.unk_token) return result def lowerCamelCase ( self : Dict , _snake_case : List[str]): """simple docstring""" UpperCAmelCase_ = ''' '''.join(_snake_case) # make sure @@ tokens are concatenated UpperCAmelCase_ = ''''''.join(string.split(_snake_case)) return string def lowerCamelCase ( self : Optional[int] , _snake_case : str , _snake_case : Optional[str] = None): """simple docstring""" if not os.path.isdir(_snake_case): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""") return UpperCAmelCase_ = os.path.join( _snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) UpperCAmelCase_ = os.path.join( _snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file''']) with open(_snake_case , '''w''' , encoding='''utf-8''') as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_snake_case , ensure_ascii=_snake_case) + '''\n''') UpperCAmelCase_ = 0 if self.bpe_ranks is None: return (vocab_file,) with open(_snake_case , '''w''' , encoding='''utf-8''') as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _snake_case: kv[1]): if index != token_index: logger.warning( F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''') UpperCAmelCase_ = token_index writer.write(''' '''.join(_snake_case) + '''\n''') index += 1 return (vocab_file, merges_file)
350
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets snake_case_ : Dict = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n" snake_case_ : List[str] = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n" snake_case_ : List[Any] = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __snake_case ( datasets.Metric ): def lowerCamelCase ( self : Tuple): """simple docstring""" if version.parse(scb.__version__) < version.parse('''1.4.12'''): raise ImportWarning( '''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n''' '''You can install it with `pip install "sacrebleu>=1.4.12"`.''') return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence'''), '''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''') , id='''references'''), }) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[ '''https://github.com/jhclark/tercom''', ] , ) def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , ): """simple docstring""" UpperCAmelCase_ = len(references[0]) if any(len(_snake_case) != references_per_prediction for refs in references): raise ValueError('''Sacrebleu requires the same number of references for each prediction''') UpperCAmelCase_ = [[refs[i] for refs in references] for i in range(_snake_case)] UpperCAmelCase_ = TER( normalized=_snake_case , no_punct=_snake_case , asian_support=_snake_case , case_sensitive=_snake_case , ) UpperCAmelCase_ = sb_ter.corpus_score(_snake_case , _snake_case) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
7
0
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() snake_case_ : Any = logging.get_logger(__name__) def A (__A : Optional[int] , __A : List[Any]=False ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''vit.embeddings.cls_token'''), ('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" UpperCAmelCase_ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def A (__A : Tuple , __A : Tuple , __A : List[Any]=False ) -> Any: """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: UpperCAmelCase_ = '''''' else: UpperCAmelCase_ = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase_ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" ) UpperCAmelCase_ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ = in_proj_weight[ : config.hidden_size, : ] UpperCAmelCase_ = in_proj_bias[: config.hidden_size] UpperCAmelCase_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase_ = in_proj_weight[ -config.hidden_size :, : ] UpperCAmelCase_ = in_proj_bias[-config.hidden_size :] def A (__A : List[Any] ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(__A , __A ) def A (__A : str , __A : Union[str, Any] , __A : int ) -> Any: """simple docstring""" UpperCAmelCase_ = dct.pop(__A ) UpperCAmelCase_ = val def A () -> Tuple: """simple docstring""" UpperCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase_ = Image.open(requests.get(__A , stream=__A ).raw ) return im @torch.no_grad() def A (__A : Tuple , __A : Dict ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ = ViTConfig() UpperCAmelCase_ = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": UpperCAmelCase_ = True UpperCAmelCase_ = int(vit_name[-12:-10] ) UpperCAmelCase_ = int(vit_name[-9:-6] ) else: UpperCAmelCase_ = 1000 UpperCAmelCase_ = '''huggingface/label-files''' UpperCAmelCase_ = '''imagenet-1k-id2label.json''' UpperCAmelCase_ = json.load(open(hf_hub_download(__A , __A , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase_ = {int(__A ): v for k, v in idalabel.items()} UpperCAmelCase_ = idalabel UpperCAmelCase_ = {v: k for k, v in idalabel.items()} UpperCAmelCase_ = int(vit_name[-6:-4] ) UpperCAmelCase_ = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith('''tiny''' ): UpperCAmelCase_ = 192 UpperCAmelCase_ = 768 UpperCAmelCase_ = 12 UpperCAmelCase_ = 3 elif vit_name[9:].startswith('''small''' ): UpperCAmelCase_ = 384 UpperCAmelCase_ = 1536 UpperCAmelCase_ = 12 UpperCAmelCase_ = 6 else: pass else: if vit_name[4:].startswith('''small''' ): UpperCAmelCase_ = 768 UpperCAmelCase_ = 2304 UpperCAmelCase_ = 8 UpperCAmelCase_ = 8 elif vit_name[4:].startswith('''base''' ): pass elif vit_name[4:].startswith('''large''' ): UpperCAmelCase_ = 1024 UpperCAmelCase_ = 4096 UpperCAmelCase_ = 24 UpperCAmelCase_ = 16 elif vit_name[4:].startswith('''huge''' ): UpperCAmelCase_ = 1280 UpperCAmelCase_ = 5120 UpperCAmelCase_ = 32 UpperCAmelCase_ = 16 # load original model from timm UpperCAmelCase_ = timm.create_model(__A , pretrained=__A ) timm_model.eval() # load state_dict of original model, remove and rename some keys UpperCAmelCase_ = timm_model.state_dict() if base_model: remove_classification_head_(__A ) UpperCAmelCase_ = create_rename_keys(__A , __A ) for src, dest in rename_keys: rename_key(__A , __A , __A ) read_in_q_k_v(__A , __A , __A ) # load HuggingFace model if vit_name[-5:] == "in21k": UpperCAmelCase_ = ViTModel(__A ).eval() else: UpperCAmelCase_ = ViTForImageClassification(__A ).eval() model.load_state_dict(__A ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: UpperCAmelCase_ = DeiTImageProcessor(size=config.image_size ) else: UpperCAmelCase_ = ViTImageProcessor(size=config.image_size ) UpperCAmelCase_ = image_processor(images=prepare_img() , return_tensors='''pt''' ) UpperCAmelCase_ = encoding['''pixel_values'''] UpperCAmelCase_ = model(__A ) if base_model: UpperCAmelCase_ = timm_model.forward_features(__A ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(__A , outputs.pooler_output , atol=1E-3 ) else: UpperCAmelCase_ = timm_model(__A ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__A , outputs.logits , atol=1E-3 ) Path(__A ).mkdir(exist_ok=__A ) print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__A ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__A ) if __name__ == "__main__": snake_case_ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--vit_name", default="vit_base_patch16_224", type=str, help="Name of the ViT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) snake_case_ : int = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
351
import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class __snake_case ( unittest.TestCase , a ): def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = load_tool('''text-to-speech''') self.tool.setup() def lowerCamelCase ( self : int): """simple docstring""" torch.manual_seed(0) UpperCAmelCase_ = self.tool('''hey''') UpperCAmelCase_ = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , )) def lowerCamelCase ( self : Any): """simple docstring""" torch.manual_seed(0) UpperCAmelCase_ = self.tool('''hey''') UpperCAmelCase_ = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , ))
7
0
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case_ : str = logging.get_logger(__name__) snake_case_ : List[Any] = { "google/realm-cc-news-pretrained-embedder": ( "https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json" ), "google/realm-cc-news-pretrained-encoder": ( "https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json" ), "google/realm-cc-news-pretrained-scorer": ( "https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json" ), "google/realm-cc-news-pretrained-openqa": ( "https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json" ), "google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json", "google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json", "google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json", "google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json", # See all REALM models at https://huggingface.co/models?filter=realm } class __snake_case ( a ): UpperCAmelCase__ : List[Any] = '''realm''' def __init__( self : Union[str, Any] , _snake_case : Union[str, Any]=30522 , _snake_case : int=768 , _snake_case : List[str]=128 , _snake_case : List[str]=12 , _snake_case : Tuple=12 , _snake_case : Tuple=8 , _snake_case : int=3072 , _snake_case : int="gelu_new" , _snake_case : List[str]=0.1 , _snake_case : Dict=0.1 , _snake_case : List[Any]=512 , _snake_case : str=2 , _snake_case : str=0.0_2 , _snake_case : Dict=1e-12 , _snake_case : Union[str, Any]=256 , _snake_case : str=10 , _snake_case : Optional[int]=1e-3 , _snake_case : Dict=5 , _snake_case : Union[str, Any]=320 , _snake_case : Tuple=13353718 , _snake_case : Optional[Any]=5000 , _snake_case : Any=1 , _snake_case : Dict=0 , _snake_case : Optional[int]=2 , **_snake_case : Optional[int] , ): """simple docstring""" super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case) # Common config UpperCAmelCase_ = vocab_size UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = hidden_size UpperCAmelCase_ = retriever_proj_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = num_candidates UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = initializer_range UpperCAmelCase_ = type_vocab_size UpperCAmelCase_ = layer_norm_eps # Reader config UpperCAmelCase_ = span_hidden_size UpperCAmelCase_ = max_span_width UpperCAmelCase_ = reader_layer_norm_eps UpperCAmelCase_ = reader_beam_size UpperCAmelCase_ = reader_seq_len # Retrieval config UpperCAmelCase_ = num_block_records UpperCAmelCase_ = searcher_beam_size
352
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) else: from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel, UTransformeraDModel from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
7
0
"""simple docstring""" from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class __snake_case ( a ): UpperCAmelCase__ : Union[List[PIL.Image.Image], np.ndarray] UpperCAmelCase__ : Optional[List[bool]] UpperCAmelCase__ : Optional[List[bool]] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
353
import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __snake_case : @staticmethod def lowerCamelCase ( *_snake_case : List[str] , **_snake_case : str): """simple docstring""" pass @is_pipeline_test @require_torch @require_vision class __snake_case ( unittest.TestCase ): UpperCAmelCase__ : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def lowerCamelCase ( self : Any , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''') UpperCAmelCase_ = [ { '''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''), '''question''': '''How many cats are there?''', }, { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''question''': '''How many cats are there?''', }, ] return vqa_pipeline, examples def lowerCamelCase ( self : Optional[int] , _snake_case : List[str] , _snake_case : Optional[int]): """simple docstring""" UpperCAmelCase_ = vqa_pipeline(_snake_case , top_k=1) self.assertEqual( _snake_case , [ [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}], [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}], ] , ) @require_torch def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''') UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png''' UpperCAmelCase_ = '''How many cats are there?''' UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2) self.assertEqual( _snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}]) UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2) self.assertEqual( _snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}]) @slow @require_torch def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''') UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png''' UpperCAmelCase_ = '''How many cats are there?''' UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2) self.assertEqual( nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]) UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2) self.assertEqual( nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]) UpperCAmelCase_ = vqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2) self.assertEqual( nested_simplify(_snake_case , decimals=4) , [[{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]] * 2 , ) @require_tf @unittest.skip('''Visual question answering not implemented in TF''') def lowerCamelCase ( self : Tuple): """simple docstring""" pass
7
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging snake_case_ : Any = logging.get_logger(__name__) snake_case_ : List[Any] = "▁" snake_case_ : Any = {"vocab_file": "sentencepiece.bpe.model"} snake_case_ : List[Any] = { "vocab_file": { "facebook/mbart-large-50-one-to-many-mmt": ( "https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model" ), } } snake_case_ : List[str] = { "facebook/mbart-large-50-one-to-many-mmt": 1024, } # fmt: off snake_case_ : int = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"] class __snake_case ( a ): UpperCAmelCase__ : int = VOCAB_FILES_NAMES UpperCAmelCase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : List[str] = ['''input_ids''', '''attention_mask'''] UpperCAmelCase__ : List[int] = [] UpperCAmelCase__ : List[int] = [] def __init__( self : List[Any] , _snake_case : Tuple , _snake_case : Optional[Any]=None , _snake_case : Any=None , _snake_case : Dict="</s>" , _snake_case : int="</s>" , _snake_case : Optional[Any]="<s>" , _snake_case : List[Any]="<unk>" , _snake_case : List[str]="<pad>" , _snake_case : List[str]="<mask>" , _snake_case : Optional[Dict[str, Any]] = None , **_snake_case : str , ): """simple docstring""" UpperCAmelCase_ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case) if isinstance(_snake_case , _snake_case) else mask_token UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs UpperCAmelCase_ = kwargs.get('''additional_special_tokens''' , []) kwargs["additional_special_tokens"] += [ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=_snake_case , tgt_lang=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , ) UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(_snake_case)) UpperCAmelCase_ = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token UpperCAmelCase_ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab UpperCAmelCase_ = 1 UpperCAmelCase_ = len(self.sp_model) UpperCAmelCase_ = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_snake_case) } UpperCAmelCase_ = {v: k for k, v in self.lang_code_to_id.items()} UpperCAmelCase_ = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id) UpperCAmelCase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} UpperCAmelCase_ = src_lang if src_lang is not None else '''en_XX''' UpperCAmelCase_ = self.lang_code_to_id[self._src_lang] UpperCAmelCase_ = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def lowerCamelCase ( self : Optional[Any]): """simple docstring""" return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def lowerCamelCase ( self : int): """simple docstring""" return self._src_lang @src_lang.setter def lowerCamelCase ( self : Optional[Any] , _snake_case : str): """simple docstring""" UpperCAmelCase_ = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def __getstate__( self : int): """simple docstring""" UpperCAmelCase_ = self.__dict__.copy() UpperCAmelCase_ = None return state def __setstate__( self : str , _snake_case : Dict): """simple docstring""" UpperCAmelCase_ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs'''): UpperCAmelCase_ = {} UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = {self.convert_ids_to_tokens(_snake_case): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def lowerCamelCase ( self : int , _snake_case : str): """simple docstring""" return self.sp_model.encode(_snake_case , out_type=_snake_case) def lowerCamelCase ( self : Dict , _snake_case : str): """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] UpperCAmelCase_ = self.sp_model.PieceToId(_snake_case) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowerCamelCase ( self : Dict , _snake_case : int): """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def lowerCamelCase ( self : str , _snake_case : Any): """simple docstring""" UpperCAmelCase_ = [] UpperCAmelCase_ = '''''' UpperCAmelCase_ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_snake_case) + token UpperCAmelCase_ = True UpperCAmelCase_ = [] else: current_sub_tokens.append(_snake_case) UpperCAmelCase_ = False out_string += self.sp_model.decode(_snake_case) return out_string.strip() def lowerCamelCase ( self : Tuple , _snake_case : str , _snake_case : Optional[str] = None): """simple docstring""" if not os.path.isdir(_snake_case): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""") return UpperCAmelCase_ = os.path.join( _snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) if os.path.abspath(self.vocab_file) != os.path.abspath(_snake_case) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , _snake_case) elif not os.path.isfile(self.vocab_file): with open(_snake_case , '''wb''') as fi: UpperCAmelCase_ = self.sp_model.serialized_model_proto() fi.write(_snake_case) return (out_vocab_file,) def lowerCamelCase ( self : Optional[Any] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case) UpperCAmelCase_ = [1] * len(self.prefix_tokens) UpperCAmelCase_ = [1] * len(self.suffix_tokens) if token_ids_a is None: return prefix_ones + ([0] * len(_snake_case)) + suffix_ones return prefix_ones + ([0] * len(_snake_case)) + ([0] * len(_snake_case)) + suffix_ones def lowerCamelCase ( self : int , _snake_case : List[int] , _snake_case : Optional[List[int]] = None): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def lowerCamelCase ( self : List[str] , _snake_case : List[str] , _snake_case : str , _snake_case : Optional[str] , _snake_case : Optional[str] , **_snake_case : Any): """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''') UpperCAmelCase_ = src_lang UpperCAmelCase_ = self(_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , **_snake_case) UpperCAmelCase_ = self.convert_tokens_to_ids(_snake_case) UpperCAmelCase_ = tgt_lang_id return inputs def lowerCamelCase ( self : str , _snake_case : List[str] , _snake_case : str = "en_XX" , _snake_case : Optional[List[str]] = None , _snake_case : str = "ro_RO" , **_snake_case : Optional[int] , ): """simple docstring""" UpperCAmelCase_ = src_lang UpperCAmelCase_ = tgt_lang return super().prepare_seqaseq_batch(_snake_case , _snake_case , **_snake_case) def lowerCamelCase ( self : str): """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang) def lowerCamelCase ( self : Tuple , _snake_case : str): """simple docstring""" UpperCAmelCase_ = self.lang_code_to_id[src_lang] UpperCAmelCase_ = [self.cur_lang_code_id] UpperCAmelCase_ = [self.eos_token_id] def lowerCamelCase ( self : Any , _snake_case : str): """simple docstring""" UpperCAmelCase_ = self.lang_code_to_id[tgt_lang] UpperCAmelCase_ = [self.cur_lang_code_id] UpperCAmelCase_ = [self.eos_token_id]
354
from timeit import timeit def A (__A : int ) -> int: """simple docstring""" if number < 0: raise ValueError('''the value of input must not be negative''' ) UpperCAmelCase_ = 0 while number: number &= number - 1 result += 1 return result def A (__A : int ) -> int: """simple docstring""" if number < 0: raise ValueError('''the value of input must not be negative''' ) UpperCAmelCase_ = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def A () -> None: """simple docstring""" def do_benchmark(__A : int ) -> None: UpperCAmelCase_ = '''import __main__ as z''' print(F"""Benchmark when {number = }:""" ) print(F"""{get_set_bits_count_using_modulo_operator(__A ) = }""" ) UpperCAmelCase_ = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=__A ) print(F"""timeit() runs in {timing} seconds""" ) print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__A ) = }""" ) UpperCAmelCase_ = timeit( '''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=__A , ) print(F"""timeit() runs in {timing} seconds""" ) for number in (25, 37, 58, 0): do_benchmark(__A ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
7
0
import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version snake_case_ : Tuple = version.parse(importlib_metadata.version('''nltk''')) if NLTK_VERSION >= version.Version('''3.6.4'''): from nltk import word_tokenize snake_case_ : Optional[Any] = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n" snake_case_ : Optional[Any] = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n" snake_case_ : Dict = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __snake_case ( datasets.Metric ): def lowerCamelCase ( self : List[str]): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence'''), '''references''': datasets.Value('''string''' , id='''sequence'''), }) , codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] , reference_urls=[ '''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''', '''https://en.wikipedia.org/wiki/METEOR''', ] , ) def lowerCamelCase ( self : Union[str, Any] , _snake_case : Union[str, Any]): """simple docstring""" import nltk nltk.download('''wordnet''') if NLTK_VERSION >= version.Version('''3.6.5'''): nltk.download('''punkt''') if NLTK_VERSION >= version.Version('''3.6.6'''): nltk.download('''omw-1.4''') def lowerCamelCase ( self : List[str] , _snake_case : Optional[int] , _snake_case : str , _snake_case : List[str]=0.9 , _snake_case : Optional[int]=3 , _snake_case : Optional[int]=0.5): """simple docstring""" if NLTK_VERSION >= version.Version('''3.6.5'''): UpperCAmelCase_ = [ meteor_score.single_meteor_score( word_tokenize(_snake_case) , word_tokenize(_snake_case) , alpha=_snake_case , beta=_snake_case , gamma=_snake_case) for ref, pred in zip(_snake_case , _snake_case) ] else: UpperCAmelCase_ = [ meteor_score.single_meteor_score(_snake_case , _snake_case , alpha=_snake_case , beta=_snake_case , gamma=_snake_case) for ref, pred in zip(_snake_case , _snake_case) ] return {"meteor": np.mean(_snake_case)}
355
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class __snake_case ( unittest.TestCase ): def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = 10 def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = [1, 2, 3, 4] UpperCAmelCase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case) def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case) def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case) def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = '''It was the year of Our Lord one thousand seven hundred and seventy-five.\n\nSpiritual revelations were conceded to England at that favoured period, as at this.''' UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case) self.assertEqual(_snake_case , []) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = '''''' UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case) self.assertEqual(_snake_case , []) self.assertEqual(_snake_case , []) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = ( '''It was the year of Our Lord one thousand seven hundred and ''' '''seventy-five\n\nSpiritual revelations were conceded to England ''' '''at that favoured period, as at this.\n@highlight\n\nIt was the best of times''' ) UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case) UpperCAmelCase_ = [ '''It was the year of Our Lord one thousand seven hundred and seventy-five.''', '''Spiritual revelations were conceded to England at that favoured period, as at this.''', ] self.assertEqual(_snake_case , _snake_case) UpperCAmelCase_ = ['''It was the best of times.'''] self.assertEqual(_snake_case , _snake_case) def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = torch.tensor([1, 2, 3, 4]) UpperCAmelCase_ = torch.tensor([1, 1, 1, 1]) np.testing.assert_array_equal(build_mask(_snake_case , 0).numpy() , expected.numpy()) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = torch.tensor([1, 2, 3, 4, 23, 23, 23]) UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0]) np.testing.assert_array_equal(build_mask(_snake_case , 23).numpy() , expected.numpy()) def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1]) UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0]) np.testing.assert_array_equal(build_mask(_snake_case , 1).numpy() , expected.numpy()) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = 101 UpperCAmelCase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]]) UpperCAmelCase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]]) UpperCAmelCase_ = compute_token_type_ids(_snake_case , _snake_case) np.testing.assert_array_equal(_snake_case , _snake_case)
7
0
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device snake_case_ : Optional[Any] = False class __snake_case ( unittest.TestCase ): pass @nightly @require_torch_gpu class __snake_case ( unittest.TestCase ): def lowerCamelCase ( self : Tuple): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa) pipe.to(_snake_case) pipe.set_progress_bar_config(disable=_snake_case) UpperCAmelCase_ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''') UpperCAmelCase_ = torch.manual_seed(0) UpperCAmelCase_ = pipe.dual_guided( prompt='''first prompt''' , image=_snake_case , text_to_image_strength=0.7_5 , generator=_snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(_snake_case) UpperCAmelCase_ = VersatileDiffusionPipeline.from_pretrained(_snake_case , torch_dtype=torch.floataa) pipe.to(_snake_case) pipe.set_progress_bar_config(disable=_snake_case) UpperCAmelCase_ = generator.manual_seed(0) UpperCAmelCase_ = pipe.dual_guided( prompt='''first prompt''' , image=_snake_case , text_to_image_strength=0.7_5 , generator=_snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images assert np.abs(image - new_image).sum() < 1e-5, "Models don't have the same forward pass" def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa) pipe.to(_snake_case) pipe.set_progress_bar_config(disable=_snake_case) UpperCAmelCase_ = '''cyberpunk 2077''' UpperCAmelCase_ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''') UpperCAmelCase_ = torch.manual_seed(0) UpperCAmelCase_ = pipe.dual_guided( prompt=_snake_case , image=_snake_case , text_to_image_strength=0.7_5 , generator=_snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images UpperCAmelCase_ = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 UpperCAmelCase_ = '''A painting of a squirrel eating a burger ''' UpperCAmelCase_ = torch.manual_seed(0) UpperCAmelCase_ = pipe.text_to_image( prompt=_snake_case , generator=_snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''').images UpperCAmelCase_ = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 UpperCAmelCase_ = pipe.image_variation(_snake_case , generator=_snake_case , output_type='''numpy''').images UpperCAmelCase_ = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
356
import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): snake_case_ : Any = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right snake_case_ : Optional[Any] = 128022 snake_case_ : Optional[int] = 128028 @require_sentencepiece class __snake_case ( a , unittest.TestCase ): UpperCAmelCase__ : List[str] = MaMaaaTokenizer UpperCAmelCase__ : int = False UpperCAmelCase__ : Dict = False UpperCAmelCase__ : List[str] = True def lowerCamelCase ( self : str): """simple docstring""" super().setUp() UpperCAmelCase_ = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>'''] UpperCAmelCase_ = dict(zip(_snake_case , range(len(_snake_case)))) UpperCAmelCase_ = Path(self.tmpdirname) save_json(_snake_case , save_dir / VOCAB_FILES_NAMES['''vocab_file''']) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(_snake_case , save_dir / VOCAB_FILES_NAMES['''spm_file''']) UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname) def lowerCamelCase ( self : str , **_snake_case : Union[str, Any]): """simple docstring""" return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_snake_case) def lowerCamelCase ( self : Optional[int] , _snake_case : List[str]): """simple docstring""" return ( "This is a test", "This is a test", ) def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ = '''</s>''' UpperCAmelCase_ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case) , _snake_case) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case) , _snake_case) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = list(tokenizer.get_vocab().keys()) self.assertEqual(vocab_keys[0] , '''</s>''') self.assertEqual(vocab_keys[1] , '''<unk>''') self.assertEqual(vocab_keys[-1] , '''<s>''') self.assertEqual(len(_snake_case) , tokenizer.vocab_size + len(tokenizer.get_added_vocab())) @unittest.skip('''Skip this test while all models are still to be uploaded.''') def lowerCamelCase ( self : Optional[int]): """simple docstring""" pass def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = tokenizer.tokenize('''This is a test''') self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']) self.assertListEqual( tokenizer.convert_tokens_to_ids(_snake_case) , [2, 3, 4, 5, 6] , ) UpperCAmelCase_ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6]) self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']) UpperCAmelCase_ = tokenizer.convert_tokens_to_string(_snake_case) self.assertEqual(_snake_case , '''This is a test''') @slow def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = {'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_snake_case , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , ) @require_torch @require_sentencepiece @require_tokenizers class __snake_case ( unittest.TestCase ): UpperCAmelCase__ : Dict = '''facebook/m2m100_418M''' UpperCAmelCase__ : Dict = [ '''In my opinion, there are two levels of response from the French government.''', '''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''', ] UpperCAmelCase__ : Dict = [ '''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''', '''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''', ] # fmt: off UpperCAmelCase__ : Any = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2] @classmethod def lowerCamelCase ( cls : Optional[Any]): """simple docstring""" UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''') UpperCAmelCase_ = 1 return cls def lowerCamelCase ( self : List[Any]): """simple docstring""" self.assertEqual(self.tokenizer.get_lang_id('''ar''') , 128006) self.assertEqual(self.tokenizer.get_lang_id('''en''') , 128022) self.assertEqual(self.tokenizer.get_lang_id('''ro''') , 128076) self.assertEqual(self.tokenizer.get_lang_id('''mr''') , 128063) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = self.tokenizer.get_vocab() self.assertEqual(len(_snake_case) , self.tokenizer.vocab_size) self.assertEqual(vocab['''<unk>'''] , 3) self.assertIn(self.tokenizer.get_lang_token('''en''') , _snake_case) def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = '''en''' UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0] self.assertListEqual(self.expected_src_tokens , _snake_case) def lowerCamelCase ( self : Any): """simple docstring""" self.assertIn(_snake_case , self.tokenizer.all_special_ids) # fmt: off UpperCAmelCase_ = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2] # fmt: on UpperCAmelCase_ = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case) UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_snake_case) self.assertEqual(_snake_case , _snake_case) self.assertNotIn(self.tokenizer.eos_token , _snake_case) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = tempfile.mkdtemp() UpperCAmelCase_ = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(_snake_case) UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(_snake_case) self.assertDictEqual(new_tok.lang_token_to_id , _snake_case) @require_torch def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = '''en''' UpperCAmelCase_ = '''fr''' UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_snake_case , return_tensors='''pt''') UpperCAmelCase_ = shift_tokens_right( batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id) for k in batch: UpperCAmelCase_ = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = '''mr''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')]) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id]) UpperCAmelCase_ = '''zh''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')]) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id]) @require_torch def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = '''mr''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')]) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id]) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)]) UpperCAmelCase_ = '''zh''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')]) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id]) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)]) @require_torch def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''') self.assertEqual( nested_simplify(_snake_case) , { # en_XX, A, test, EOS '''input_ids''': [[128022, 58, 4183, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 128006, } , )
7
0
def A (__A : dict ) -> set: """simple docstring""" UpperCAmelCase_ = set() # edges = list of graph's edges UpperCAmelCase_ = get_edges(__A ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: UpperCAmelCase_ , UpperCAmelCase_ = edges.pop() chosen_vertices.add(__A ) chosen_vertices.add(__A ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(__A ) return chosen_vertices def A (__A : dict ) -> set: """simple docstring""" UpperCAmelCase_ = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
357
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING snake_case_ : List[str] = logging.get_logger(__name__) @add_end_docstrings(a ) class __snake_case ( a ): def __init__( self : Tuple , *_snake_case : List[Any] , **_snake_case : Optional[Any]): """simple docstring""" super().__init__(*_snake_case , **_snake_case) self.check_model_type(_snake_case) def lowerCamelCase ( self : List[str] , _snake_case : Optional[int]=None , _snake_case : Optional[Any]=None , _snake_case : str=None , **_snake_case : Optional[Any]): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = {}, {} if padding is not None: UpperCAmelCase_ = padding if truncation is not None: UpperCAmelCase_ = truncation if top_k is not None: UpperCAmelCase_ = top_k return preprocess_params, {}, postprocess_params def __call__( self : List[Any] , _snake_case : Union["Image.Image", str] , _snake_case : str = None , **_snake_case : str): """simple docstring""" if isinstance(_snake_case , (Image.Image, str)) and isinstance(_snake_case , _snake_case): UpperCAmelCase_ = {'''image''': image, '''question''': question} else: UpperCAmelCase_ = image UpperCAmelCase_ = super().__call__(_snake_case , **_snake_case) return results def lowerCamelCase ( self : Union[str, Any] , _snake_case : int , _snake_case : Optional[int]=False , _snake_case : int=False): """simple docstring""" UpperCAmelCase_ = load_image(inputs['''image''']) UpperCAmelCase_ = self.tokenizer( inputs['''question'''] , return_tensors=self.framework , padding=_snake_case , truncation=_snake_case) UpperCAmelCase_ = self.image_processor(images=_snake_case , return_tensors=self.framework) model_inputs.update(_snake_case) return model_inputs def lowerCamelCase ( self : List[Any] , _snake_case : Optional[Any]): """simple docstring""" UpperCAmelCase_ = self.model(**_snake_case) return model_outputs def lowerCamelCase ( self : str , _snake_case : Optional[Any] , _snake_case : List[str]=5): """simple docstring""" if top_k > self.model.config.num_labels: UpperCAmelCase_ = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase_ = model_outputs.logits.sigmoid()[0] UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(_snake_case) else: raise ValueError(F"""Unsupported framework: {self.framework}""") UpperCAmelCase_ = scores.tolist() UpperCAmelCase_ = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_snake_case , _snake_case)]
7
0
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging snake_case_ : List[str] = logging.get_logger(__name__) snake_case_ : List[str] = {"vocab_file": "spiece.model"} snake_case_ : Optional[int] = { "vocab_file": { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model", } } snake_case_ : List[Any] = { "xlnet-base-cased": None, "xlnet-large-cased": None, } # Segments (not really needed) snake_case_ : List[str] = 0 snake_case_ : int = 1 snake_case_ : Optional[int] = 2 snake_case_ : Optional[int] = 3 snake_case_ : List[Any] = 4 class __snake_case ( a ): UpperCAmelCase__ : Any = VOCAB_FILES_NAMES UpperCAmelCase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : int = '''left''' def __init__( self : Tuple , _snake_case : Optional[Any] , _snake_case : Optional[int]=False , _snake_case : int=True , _snake_case : List[Any]=False , _snake_case : int="<s>" , _snake_case : Union[str, Any]="</s>" , _snake_case : List[str]="<unk>" , _snake_case : str="<sep>" , _snake_case : Optional[Any]="<pad>" , _snake_case : Tuple="<cls>" , _snake_case : int="<mask>" , _snake_case : Union[str, Any]=["<eop>", "<eod>"] , _snake_case : Optional[Dict[str, Any]] = None , **_snake_case : int , ): """simple docstring""" UpperCAmelCase_ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case) if isinstance(_snake_case , _snake_case) else mask_token UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , additional_special_tokens=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , ) UpperCAmelCase_ = 3 UpperCAmelCase_ = do_lower_case UpperCAmelCase_ = remove_space UpperCAmelCase_ = keep_accents UpperCAmelCase_ = vocab_file UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(_snake_case) @property def lowerCamelCase ( self : Tuple): """simple docstring""" return len(self.sp_model) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = {self.convert_ids_to_tokens(_snake_case): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self : Any): """simple docstring""" UpperCAmelCase_ = self.__dict__.copy() UpperCAmelCase_ = None return state def __setstate__( self : str , _snake_case : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs'''): UpperCAmelCase_ = {} UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def lowerCamelCase ( self : int , _snake_case : Any): """simple docstring""" if self.remove_space: UpperCAmelCase_ = ''' '''.join(inputs.strip().split()) else: UpperCAmelCase_ = inputs UpperCAmelCase_ = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''') if not self.keep_accents: UpperCAmelCase_ = unicodedata.normalize('''NFKD''' , _snake_case) UpperCAmelCase_ = ''''''.join([c for c in outputs if not unicodedata.combining(_snake_case)]) if self.do_lower_case: UpperCAmelCase_ = outputs.lower() return outputs def lowerCamelCase ( self : Any , _snake_case : str): """simple docstring""" UpperCAmelCase_ = self.preprocess_text(_snake_case) UpperCAmelCase_ = self.sp_model.encode(_snake_case , out_type=_snake_case) UpperCAmelCase_ = [] for piece in pieces: if len(_snake_case) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit(): UpperCAmelCase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_snake_case , '''''')) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0]) == 1: UpperCAmelCase_ = cur_pieces[1:] else: UpperCAmelCase_ = cur_pieces[0][1:] cur_pieces.append(piece[-1]) new_pieces.extend(_snake_case) else: new_pieces.append(_snake_case) return new_pieces def lowerCamelCase ( self : Tuple , _snake_case : Dict): """simple docstring""" return self.sp_model.PieceToId(_snake_case) def lowerCamelCase ( self : str , _snake_case : Union[str, Any]): """simple docstring""" return self.sp_model.IdToPiece(_snake_case) def lowerCamelCase ( self : List[Any] , _snake_case : Optional[Any]): """simple docstring""" UpperCAmelCase_ = ''''''.join(_snake_case).replace(_snake_case , ''' ''').strip() return out_string def lowerCamelCase ( self : Any , _snake_case : List[int] , _snake_case : bool = False , _snake_case : bool = None , _snake_case : bool = True , **_snake_case : Dict , ): """simple docstring""" UpperCAmelCase_ = kwargs.pop('''use_source_tokenizer''' , _snake_case) UpperCAmelCase_ = self.convert_ids_to_tokens(_snake_case , skip_special_tokens=_snake_case) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 UpperCAmelCase_ = [] UpperCAmelCase_ = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_snake_case)) UpperCAmelCase_ = [] sub_texts.append(_snake_case) else: current_sub_text.append(_snake_case) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_snake_case)) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens UpperCAmelCase_ = ''''''.join(_snake_case) UpperCAmelCase_ = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: UpperCAmelCase_ = self.clean_up_tokenization(_snake_case) return clean_text else: return text def lowerCamelCase ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None): """simple docstring""" UpperCAmelCase_ = [self.sep_token_id] UpperCAmelCase_ = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def lowerCamelCase ( self : Tuple , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case) if token_ids_a is not None: return ([0] * len(_snake_case)) + [1] + ([0] * len(_snake_case)) + [1, 1] return ([0] * len(_snake_case)) + [1, 1] def lowerCamelCase ( self : Any , _snake_case : List[int] , _snake_case : Optional[List[int]] = None): """simple docstring""" UpperCAmelCase_ = [self.sep_token_id] UpperCAmelCase_ = [2] if token_ids_a is None: return len(token_ids_a + sep) * [0] + cls_segment_id return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id def lowerCamelCase ( self : Tuple , _snake_case : str , _snake_case : Optional[str] = None): """simple docstring""" if not os.path.isdir(_snake_case): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""") return UpperCAmelCase_ = os.path.join( _snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) if os.path.abspath(self.vocab_file) != os.path.abspath(_snake_case) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , _snake_case) elif not os.path.isfile(self.vocab_file): with open(_snake_case , '''wb''') as fi: UpperCAmelCase_ = self.sp_model.serialized_model_proto() fi.write(_snake_case) return (out_vocab_file,)
358
import sys def A (__A : int ) -> Dict: """simple docstring""" UpperCAmelCase_ = len(__A ) UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )] UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )] for chain_length in range(2 , __A ): for a in range(1 , n - chain_length + 1 ): UpperCAmelCase_ = a + chain_length - 1 UpperCAmelCase_ = sys.maxsize for c in range(__A , __A ): UpperCAmelCase_ = ( matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b] ) if cost < matrix[a][b]: UpperCAmelCase_ = cost UpperCAmelCase_ = c return matrix, sol def A (__A : Any , __A : Dict , __A : Optional[int] ) -> Optional[int]: """simple docstring""" if i == j: print('''A''' + str(__A ) , end=''' ''' ) else: print('''(''' , end=''' ''' ) print_optiomal_solution(__A , __A , optimal_solution[i][j] ) print_optiomal_solution(__A , optimal_solution[i][j] + 1 , __A ) print(''')''' , end=''' ''' ) def A () -> List[str]: """simple docstring""" UpperCAmelCase_ = [30, 35, 15, 5, 10, 20, 25] UpperCAmelCase_ = len(__A ) # Size of matrix created from above array will be # 30*35 35*15 15*5 5*10 10*20 20*25 UpperCAmelCase_ , UpperCAmelCase_ = matrix_chain_order(__A ) print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) ) print_optiomal_solution(__A , 1 , n - 1 ) if __name__ == "__main__": main()
7
0
"""simple docstring""" def A (__A : List[str] , __A : List[Any] , __A : Dict , __A : List[str] ) -> Tuple: """simple docstring""" global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: UpperCAmelCase_ = mf_knapsack(i - 1 , __A , __A , __A ) else: UpperCAmelCase_ = max( mf_knapsack(i - 1 , __A , __A , __A ) , mf_knapsack(i - 1 , __A , __A , j - wt[i - 1] ) + val[i - 1] , ) UpperCAmelCase_ = val return f[i][j] def A (__A : Optional[Any] , __A : int , __A : Tuple , __A : int ) -> str: """simple docstring""" UpperCAmelCase_ = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: UpperCAmelCase_ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: UpperCAmelCase_ = dp[i - 1][w_] return dp[n][w_], dp def A (__A : int , __A : list , __A : list ) -> List[str]: """simple docstring""" if not (isinstance(__A , (list, tuple) ) and isinstance(__A , (list, tuple) )): raise ValueError( '''Both the weights and values vectors must be either lists or tuples''' ) UpperCAmelCase_ = len(__A ) if num_items != len(__A ): UpperCAmelCase_ = ( '''The number of weights must be the same as the number of values.\n''' F"""But got {num_items} weights and {len(__A )} values""" ) raise ValueError(__A ) for i in range(__A ): if not isinstance(wt[i] , __A ): UpperCAmelCase_ = ( '''All weights must be integers but got weight of ''' F"""type {type(wt[i] )} at index {i}""" ) raise TypeError(__A ) UpperCAmelCase_ , UpperCAmelCase_ = knapsack(__A , __A , __A , __A ) UpperCAmelCase_ = set() _construct_solution(__A , __A , __A , __A , __A ) return optimal_val, example_optional_set def A (__A : list , __A : list , __A : int , __A : int , __A : set ) -> Tuple: """simple docstring""" if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(__A , __A , i - 1 , __A , __A ) else: optimal_set.add(__A ) _construct_solution(__A , __A , i - 1 , j - wt[i - 1] , __A ) if __name__ == "__main__": snake_case_ : str = [3, 2, 4, 4] snake_case_ : Optional[Any] = [4, 3, 2, 3] snake_case_ : str = 4 snake_case_ : Any = 6 snake_case_ : List[Any] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] snake_case_ : Any = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 snake_case_ : List[Any] = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("optimal_value = ", optimal_solution) print("An optimal subset corresponding to the optimal value", optimal_subset)
359
import inspect import os import re from transformers.configuration_utils import PretrainedConfig from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py snake_case_ : int = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. snake_case_ : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS) snake_case_ : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING snake_case_ : Union[str, Any] = { # used to compute the property `self.chunk_length` "EncodecConfig": ["overlap"], # used as `self.bert_model = BertModel(config, ...)` "DPRConfig": True, # not used in modeling files, but it's an important information "FSMTConfig": ["langs"], # used internally in the configuration class file "GPTNeoConfig": ["attention_types"], # used internally in the configuration class file "EsmConfig": ["is_folding_model"], # used during training (despite we don't have training script for these models yet) "Mask2FormerConfig": ["ignore_value"], # `ignore_value` used during training (despite we don't have training script for these models yet) # `norm` used in conversion script (despite not using in the modeling file) "OneFormerConfig": ["ignore_value", "norm"], # used during preprocessing and collation, see `collating_graphormer.py` "GraphormerConfig": ["spatial_pos_max"], # used internally in the configuration class file "T5Config": ["feed_forward_proj"], # used internally in the configuration class file # `tokenizer_class` get default value `T5Tokenizer` intentionally "MT5Config": ["feed_forward_proj", "tokenizer_class"], "UMT5Config": ["feed_forward_proj", "tokenizer_class"], # used internally in the configuration class file "LongT5Config": ["feed_forward_proj"], # used internally in the configuration class file "SwitchTransformersConfig": ["feed_forward_proj"], # having default values other than `1e-5` - we can't fix them without breaking "BioGptConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "GLPNConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "SegformerConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "CvtConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "PerceiverConfig": ["layer_norm_eps"], # used internally to calculate the feature size "InformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate the feature size "TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate the feature size "AutoformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate `mlp_dim` "SamVisionConfig": ["mlp_ratio"], # For (head) training, but so far not implemented "ClapAudioConfig": ["num_classes"], # Not used, but providing useful information to users "SpeechT5HifiGanConfig": ["sampling_rate"], } # TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure SPECIAL_CASES_TO_ALLOW.update( { "CLIPSegConfig": True, "DeformableDetrConfig": True, "DetaConfig": True, "DinatConfig": True, "DonutSwinConfig": True, "EfficientFormerConfig": True, "FSMTConfig": True, "JukeboxConfig": True, "LayoutLMv2Config": True, "MaskFormerSwinConfig": True, "MT5Config": True, "NatConfig": True, "OneFormerConfig": True, "PerceiverConfig": True, "RagConfig": True, "SpeechT5Config": True, "SwinConfig": True, "Swin2SRConfig": True, "Swinv2Config": True, "SwitchTransformersConfig": True, "TableTransformerConfig": True, "TapasConfig": True, "TransfoXLConfig": True, "UniSpeechConfig": True, "UniSpeechSatConfig": True, "WavLMConfig": True, "WhisperConfig": True, # TODO: @Arthur (for `alignment_head` and `alignment_layer`) "JukeboxPriorConfig": True, # TODO: @Younes (for `is_decoder`) "Pix2StructTextConfig": True, } ) def A (__A : List[Any] , __A : Optional[int] , __A : str , __A : Dict ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ = False for attribute in attributes: for modeling_source in source_strings: # check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)` if ( F"""config.{attribute}""" in modeling_source or F"""getattr(config, \"{attribute}\"""" in modeling_source or F"""getattr(self.config, \"{attribute}\"""" in modeling_source ): UpperCAmelCase_ = True # Deal with multi-line cases elif ( re.search( RF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , __A , ) is not None ): UpperCAmelCase_ = True # `SequenceSummary` is called with `SequenceSummary(config)` elif attribute in [ "summary_type", "summary_use_proj", "summary_activation", "summary_last_dropout", "summary_proj_to_labels", "summary_first_dropout", ]: if "SequenceSummary" in modeling_source: UpperCAmelCase_ = True if attribute_used: break if attribute_used: break # common and important attributes, even if they do not always appear in the modeling files UpperCAmelCase_ = [ '''bos_index''', '''eos_index''', '''pad_index''', '''unk_index''', '''mask_index''', '''image_size''', '''use_cache''', '''out_features''', '''out_indices''', ] UpperCAmelCase_ = ['''encoder_no_repeat_ngram_size'''] # Special cases to be allowed UpperCAmelCase_ = True if not attribute_used: UpperCAmelCase_ = False for attribute in attributes: # Allow if the default value in the configuration class is different from the one in `PretrainedConfig` if attribute in ["is_encoder_decoder"] and default_value is True: UpperCAmelCase_ = True elif attribute in ["tie_word_embeddings"] and default_value is False: UpperCAmelCase_ = True # Allow cases without checking the default value in the configuration class elif attribute in attributes_to_allow + attributes_used_in_generation: UpperCAmelCase_ = True elif attribute.endswith('''_token_id''' ): UpperCAmelCase_ = True # configuration class specific cases if not case_allowed: UpperCAmelCase_ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] ) UpperCAmelCase_ = allowed_cases is True or attribute in allowed_cases return attribute_used or case_allowed def A (__A : Tuple ) -> List[Any]: """simple docstring""" UpperCAmelCase_ = dict(inspect.signature(config_class.__init__ ).parameters ) UpperCAmelCase_ = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']] UpperCAmelCase_ = [signature[param].default for param in parameter_names] # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long # as one variant is used, the test should pass UpperCAmelCase_ = {} if len(config_class.attribute_map ) > 0: UpperCAmelCase_ = {v: k for k, v in config_class.attribute_map.items()} # Get the path to modeling source files UpperCAmelCase_ = inspect.getsourcefile(__A ) UpperCAmelCase_ = os.path.dirname(__A ) # Let's check against all frameworks: as long as one framework uses an attribute, we are good. UpperCAmelCase_ = [os.path.join(__A , __A ) for fn in os.listdir(__A ) if fn.startswith('''modeling_''' )] # Get the source code strings UpperCAmelCase_ = [] for path in modeling_paths: if os.path.isfile(__A ): with open(__A ) as fp: modeling_sources.append(fp.read() ) UpperCAmelCase_ = [] for config_param, default_value in zip(__A , __A ): # `attributes` here is all the variant names for `config_param` UpperCAmelCase_ = [config_param] # some configuration classes have non-empty `attribute_map`, and both names could be used in the # corresponding modeling files. As long as one of them appears, it is fine. if config_param in reversed_attribute_map: attributes.append(reversed_attribute_map[config_param] ) if not check_attribute_being_used(__A , __A , __A , __A ): unused_attributes.append(attributes[0] ) return sorted(__A ) def A () -> Any: """simple docstring""" UpperCAmelCase_ = {} for _config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in _config_class.__module__: continue # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.) UpperCAmelCase_ = [ cls for name, cls in inspect.getmembers( inspect.getmodule(_config_class ) , lambda __A : inspect.isclass(__A ) and issubclass(__A , __A ) and inspect.getmodule(__A ) == inspect.getmodule(_config_class ) , ) ] for config_class in config_classes_in_module: UpperCAmelCase_ = check_config_attributes_being_used(__A ) if len(__A ) > 0: UpperCAmelCase_ = unused_attributes if len(__A ) > 0: UpperCAmelCase_ = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n''' for name, attributes in configs_with_unused_attributes.items(): error += F"""{name}: {attributes}\n""" raise ValueError(__A ) if __name__ == "__main__": check_config_attributes()
7
0
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo snake_case_ : List[Any] = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n" snake_case_ : Optional[int] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n" snake_case_ : str = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __snake_case ( datasets.Metric ): def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''') , id='''sequence'''), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''' , id='''token''') , id='''sequence''') , id='''references'''), }) , ) def lowerCamelCase ( self : Union[str, Any] , _snake_case : List[List[List[str]]] , _snake_case : List[List[str]] , _snake_case : int = 1 , _snake_case : int = 4 , ): """simple docstring""" return { "google_bleu": gleu_score.corpus_gleu( list_of_references=_snake_case , hypotheses=_snake_case , min_len=_snake_case , max_len=_snake_case) }
360
import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class __snake_case ( a , unittest.TestCase ): UpperCAmelCase__ : Optional[Any] = FlaxAutoencoderKL @property def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = 4 UpperCAmelCase_ = 3 UpperCAmelCase_ = (32, 32) UpperCAmelCase_ = jax.random.PRNGKey(0) UpperCAmelCase_ = jax.random.uniform(_snake_case , ((batch_size, num_channels) + sizes)) return {"sample": image, "prng_key": prng_key} def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = { '''block_out_channels''': [32, 64], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 4, } UpperCAmelCase_ = self.dummy_input return init_dict, inputs_dict
7
0
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def A (__A : Tuple , __A : Any=False ) -> str: """simple docstring""" try: UpperCAmelCase_ = os.environ[key] except KeyError: # KEY isn't set, default to `default`. UpperCAmelCase_ = default else: # KEY is set, convert it to True or False. try: UpperCAmelCase_ = strtobool(__A ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F"""If set, {key} must be yes or no.""" ) return _value snake_case_ : int = parse_flag_from_env("RUN_SLOW", default=False) def A (__A : List[str] ) -> List[str]: """simple docstring""" return unittest.skip('''Test was skipped''' )(__A ) def A (__A : List[str] ) -> List[str]: """simple docstring""" return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(__A ) def A (__A : Dict ) -> Dict: """simple docstring""" return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(__A ) def A (__A : Optional[Any] ) -> str: """simple docstring""" return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(__A ) def A (__A : Tuple ) -> Any: """simple docstring""" return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(__A ) def A (__A : List[str] ) -> Tuple: """simple docstring""" return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(__A ) def A (__A : List[Any] ) -> List[str]: """simple docstring""" return unittest.skipUnless( is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(__A ) def A (__A : Union[str, Any] ) -> int: """simple docstring""" return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(__A ) def A (__A : List[str] ) -> Optional[Any]: """simple docstring""" return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(__A ) def A (__A : Dict ) -> str: """simple docstring""" return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(__A ) def A (__A : Union[str, Any] ) -> str: """simple docstring""" return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(__A ) def A (__A : Dict ) -> Dict: """simple docstring""" return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(__A ) def A (__A : str ) -> Dict: """simple docstring""" return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(__A ) def A (__A : List[str] ) -> Optional[int]: """simple docstring""" return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(__A ) def A (__A : List[str] ) -> List[str]: """simple docstring""" return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(__A ) def A (__A : Optional[int] ) -> List[Any]: """simple docstring""" return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(__A ) def A (__A : List[Any]=None , __A : Dict=None ) -> Union[str, Any]: """simple docstring""" if test_case is None: return partial(__A , version=__A ) return unittest.skipUnless(is_torch_version('''>=''' , __A ) , F"""test requires torch version >= {version}""" )(__A ) def A (__A : Optional[Any] ) -> Optional[int]: """simple docstring""" return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(__A ) def A (__A : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(__A ) def A (__A : List[str] ) -> List[str]: """simple docstring""" return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(__A ) snake_case_ : Tuple = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def A (__A : Union[str, Any] ) -> Dict: """simple docstring""" return unittest.skipUnless( _atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(__A ) class __snake_case ( unittest.TestCase ): UpperCAmelCase__ : Union[str, Any] = True @classmethod def lowerCamelCase ( cls : Optional[Any]): """simple docstring""" UpperCAmelCase_ = tempfile.mkdtemp() @classmethod def lowerCamelCase ( cls : List[Any]): """simple docstring""" if os.path.exists(cls.tmpdir): shutil.rmtree(cls.tmpdir) def lowerCamelCase ( self : Dict): """simple docstring""" if self.clear_on_setup: for path in Path(self.tmpdir).glob('''**/*'''): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(_snake_case) class __snake_case ( unittest.TestCase ): def lowerCamelCase ( self : List[str]): """simple docstring""" super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class __snake_case ( unittest.TestCase ): def lowerCamelCase ( self : Dict , _snake_case : Union[mock.Mock, List[mock.Mock]]): """simple docstring""" UpperCAmelCase_ = mocks if isinstance(_snake_case , (tuple, list)) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop) def A (__A : Union[str, Any] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ = AcceleratorState() UpperCAmelCase_ = tensor[None].clone().to(state.device ) UpperCAmelCase_ = gather(__A ).cpu() UpperCAmelCase_ = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , __A ): return False return True class __snake_case : def __init__( self : int , _snake_case : Any , _snake_case : Tuple , _snake_case : Any): """simple docstring""" UpperCAmelCase_ = returncode UpperCAmelCase_ = stdout UpperCAmelCase_ = stderr async def A (__A : Any , __A : Any ) -> int: """simple docstring""" while True: UpperCAmelCase_ = await stream.readline() if line: callback(__A ) else: break async def A (__A : List[str] , __A : str=None , __A : str=None , __A : str=None , __A : int=False , __A : List[Any]=False ) -> _RunOutput: """simple docstring""" if echo: print('''\nRunning: ''' , ''' '''.join(__A ) ) UpperCAmelCase_ = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=__A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__A , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) UpperCAmelCase_ = [] UpperCAmelCase_ = [] def tee(__A : int , __A : Dict , __A : Dict , __A : List[Any]="" ): UpperCAmelCase_ = line.decode('''utf-8''' ).rstrip() sink.append(__A ) if not quiet: print(__A , __A , file=__A ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda __A : tee(__A , __A , sys.stdout , label='''stdout:''' ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda __A : tee(__A , __A , sys.stderr , label='''stderr:''' ) ) ), ] , timeout=__A , ) return _RunOutput(await p.wait() , __A , __A ) def A (__A : int , __A : List[str]=None , __A : Dict=None , __A : int=180 , __A : Union[str, Any]=False , __A : Union[str, Any]=True ) -> _RunOutput: """simple docstring""" UpperCAmelCase_ = asyncio.get_event_loop() UpperCAmelCase_ = loop.run_until_complete( _stream_subprocess(__A , env=__A , stdin=__A , timeout=__A , quiet=__A , echo=__A ) ) UpperCAmelCase_ = ''' '''.join(__A ) if result.returncode > 0: UpperCAmelCase_ = '''\n'''.join(result.stderr ) raise RuntimeError( F"""'{cmd_str}' failed with returncode {result.returncode}\n\n""" F"""The combined stderr from workers follows:\n{stderr}""" ) return result class __snake_case ( a ): pass def A (__A : List[str] , __A : List[Any]=False ) -> Tuple: """simple docstring""" try: UpperCAmelCase_ = subprocess.check_output(__A , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(__A , '''decode''' ): UpperCAmelCase_ = output.decode('''utf-8''' ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( F"""Command `{" ".join(__A )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
361
import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPTaConfig from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 snake_case_ : List[str] = { "return_dict": False, "output_hidden_states": True, "output_attentions": True, "torchscript": True, "torch_dtype": "float16", "use_bfloat16": True, "tf_legacy_loss": True, "pruned_heads": {"a": 1}, "tie_word_embeddings": False, "is_decoder": True, "cross_attention_hidden_size": 128, "add_cross_attention": True, "tie_encoder_decoder": True, "max_length": 50, "min_length": 3, "do_sample": True, "early_stopping": True, "num_beams": 3, "num_beam_groups": 3, "diversity_penalty": 0.5, "temperature": 2.0, "top_k": 10, "top_p": 0.7, "typical_p": 0.2, "repetition_penalty": 0.8, "length_penalty": 0.8, "no_repeat_ngram_size": 5, "encoder_no_repeat_ngram_size": 5, "bad_words_ids": [1, 2, 3], "num_return_sequences": 3, "chunk_size_feed_forward": 5, "output_scores": True, "return_dict_in_generate": True, "forced_bos_token_id": 2, "forced_eos_token_id": 3, "remove_invalid_values": True, "architectures": ["BertModel"], "finetuning_task": "translation", "id2label": {0: "label"}, "label2id": {"label": "0"}, "tokenizer_class": "BertTokenizerFast", "prefix": "prefix", "bos_token_id": 6, "pad_token_id": 7, "eos_token_id": 8, "sep_token_id": 9, "decoder_start_token_id": 10, "exponential_decay_length_penalty": (5, 1.01), "suppress_tokens": [0, 1], "begin_suppress_tokens": 2, "task_specific_params": {"translation": "some_params"}, "problem_type": "regression", } @is_staging_test class __snake_case ( unittest.TestCase ): @classmethod def lowerCamelCase ( cls : Optional[Any]): """simple docstring""" UpperCAmelCase_ = TOKEN HfFolder.save_token(_snake_case) @classmethod def lowerCamelCase ( cls : List[str]): """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-config''') except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''') except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-config''') except HTTPError: pass def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37) config.push_to_hub('''test-config''' , use_auth_token=self._token) UpperCAmelCase_ = BertConfig.from_pretrained(F"""{USER}/test-config""") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_snake_case , getattr(_snake_case , _snake_case)) # Reset repo delete_repo(token=self._token , repo_id='''test-config''') # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(_snake_case , repo_id='''test-config''' , push_to_hub=_snake_case , use_auth_token=self._token) UpperCAmelCase_ = BertConfig.from_pretrained(F"""{USER}/test-config""") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_snake_case , getattr(_snake_case , _snake_case)) def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37) config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token) UpperCAmelCase_ = BertConfig.from_pretrained('''valid_org/test-config-org''') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_snake_case , getattr(_snake_case , _snake_case)) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-config-org''') # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( _snake_case , repo_id='''valid_org/test-config-org''' , push_to_hub=_snake_case , use_auth_token=self._token) UpperCAmelCase_ = BertConfig.from_pretrained('''valid_org/test-config-org''') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_snake_case , getattr(_snake_case , _snake_case)) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" CustomConfig.register_for_auto_class() UpperCAmelCase_ = CustomConfig(attribute=42) config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''}) UpperCAmelCase_ = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=_snake_case) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''') self.assertEqual(new_config.attribute , 42) class __snake_case ( unittest.TestCase ): def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = GPTaConfig() # attempt to modify each of int/float/bool/str config records and verify they were updated UpperCAmelCase_ = c.n_embd + 1 # int UpperCAmelCase_ = c.resid_pdrop + 1.0 # float UpperCAmelCase_ = not c.scale_attn_weights # bool UpperCAmelCase_ = c.summary_type + '''foo''' # str c.update_from_string( F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""") self.assertEqual(_snake_case , c.n_embd , '''mismatch for key: n_embd''') self.assertEqual(_snake_case , c.resid_pdrop , '''mismatch for key: resid_pdrop''') self.assertEqual(_snake_case , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''') self.assertEqual(_snake_case , c.summary_type , '''mismatch for key: summary_type''') def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = PretrainedConfig() UpperCAmelCase_ = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( _snake_case , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version''']) UpperCAmelCase_ = [key for key, value in config_common_kwargs.items() if value == getattr(_snake_case , _snake_case)] if len(_snake_case) > 0: raise ValueError( '''The following keys are set with the default values in''' ''' `test_configuration_common.config_common_kwargs` pick another value for them:''' F""" {", ".join(_snake_case)}.""") def lowerCamelCase ( self : str): """simple docstring""" with self.assertRaises(_snake_case): # config is in subfolder, the following should not work without specifying the subfolder UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''') UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''') self.assertIsNotNone(_snake_case) def lowerCamelCase ( self : Any): """simple docstring""" UpperCAmelCase_ = mock.Mock() UpperCAmelCase_ = 500 UpperCAmelCase_ = {} UpperCAmelCase_ = HTTPError UpperCAmelCase_ = {} # Download this model to make sure it's in the cache. UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''') # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=_snake_case) as mock_head: UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''') # This check we did call the fake head request mock_head.assert_called() def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = BertConfig.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''') def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = AutoConfig.from_pretrained('''bert-base-cased''') UpperCAmelCase_ = ['''config.4.0.0.json'''] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(_snake_case) UpperCAmelCase_ = 2 json.dump(configuration.to_dict() , open(os.path.join(_snake_case , '''config.4.0.0.json''') , '''w''')) # This should pick the new configuration file as the version of Transformers is > 4.0.0 UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) self.assertEqual(new_configuration.hidden_size , 2) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 UpperCAmelCase_ = ['''config.42.0.0.json'''] UpperCAmelCase_ = 768 configuration.save_pretrained(_snake_case) shutil.move(os.path.join(_snake_case , '''config.4.0.0.json''') , os.path.join(_snake_case , '''config.42.0.0.json''')) UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) self.assertEqual(new_configuration.hidden_size , 768) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = '''hf-internal-testing/test-two-configs''' import transformers as new_transformers UpperCAmelCase_ = '''v4.0.0''' UpperCAmelCase_ , UpperCAmelCase_ = new_transformers.models.auto.AutoConfig.from_pretrained( _snake_case , return_unused_kwargs=_snake_case) self.assertEqual(new_configuration.hidden_size , 2) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(_snake_case , {}) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers UpperCAmelCase_ = '''v3.0.0''' UpperCAmelCase_ = old_transformers.models.auto.AutoConfig.from_pretrained(_snake_case) self.assertEqual(old_configuration.hidden_size , 768)
7
0
def A (__A : int = 1000000 ) -> int: """simple docstring""" UpperCAmelCase_ = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , __A ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
362
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass snake_case_ : List[Any] = (3, 9, -11, 0, 7, 5, 1, -1) snake_case_ : str = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class __snake_case : UpperCAmelCase__ : int UpperCAmelCase__ : Node | None class __snake_case : def __init__( self : Optional[int] , _snake_case : Iterable[int]): """simple docstring""" UpperCAmelCase_ = None for i in sorted(_snake_case , reverse=_snake_case): UpperCAmelCase_ = Node(_snake_case , self.head) def __iter__( self : Dict): """simple docstring""" UpperCAmelCase_ = self.head while node: yield node.data UpperCAmelCase_ = node.next_node def __len__( self : int): """simple docstring""" return sum(1 for _ in self) def __str__( self : Optional[Any]): """simple docstring""" return " -> ".join([str(_snake_case) for node in self]) def A (__A : SortedLinkedList , __A : SortedLinkedList ) -> SortedLinkedList: """simple docstring""" return SortedLinkedList(list(__A ) + list(__A ) ) if __name__ == "__main__": import doctest doctest.testmod() snake_case_ : Union[str, Any] = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
7
0
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __snake_case ( a ): UpperCAmelCase__ : Optional[Any] = ['''image_processor''', '''tokenizer'''] UpperCAmelCase__ : int = '''BridgeTowerImageProcessor''' UpperCAmelCase__ : Tuple = ('''RobertaTokenizer''', '''RobertaTokenizerFast''') def __init__( self : Optional[Any] , _snake_case : Optional[Any] , _snake_case : str): """simple docstring""" super().__init__(_snake_case , _snake_case) def __call__( self : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _snake_case : bool = True , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Union[bool, str, TruncationStrategy] = None , _snake_case : Optional[int] = None , _snake_case : int = 0 , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = True , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : str , ): """simple docstring""" UpperCAmelCase_ = self.tokenizer( text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , ) # add pixel_values + pixel_mask UpperCAmelCase_ = self.image_processor( _snake_case , return_tensors=_snake_case , do_normalize=_snake_case , do_center_crop=_snake_case , **_snake_case) encoding.update(_snake_case) return encoding def lowerCamelCase ( self : List[str] , *_snake_case : List[Any] , **_snake_case : Optional[int]): """simple docstring""" return self.tokenizer.batch_decode(*_snake_case , **_snake_case) def lowerCamelCase ( self : str , *_snake_case : List[Any] , **_snake_case : str): """simple docstring""" return self.tokenizer.decode(*_snake_case , **_snake_case) @property def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = self.tokenizer.model_input_names UpperCAmelCase_ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
363
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig snake_case_ : Union[str, Any] = logging.get_logger(__name__) class __snake_case : def __init__( self : int , _snake_case : List[Any] , _snake_case : Tuple): """simple docstring""" UpperCAmelCase_ = question_encoder UpperCAmelCase_ = generator UpperCAmelCase_ = self.question_encoder def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int]): """simple docstring""" if os.path.isfile(_snake_case): raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""") os.makedirs(_snake_case , exist_ok=_snake_case) UpperCAmelCase_ = os.path.join(_snake_case , '''question_encoder_tokenizer''') UpperCAmelCase_ = os.path.join(_snake_case , '''generator_tokenizer''') self.question_encoder.save_pretrained(_snake_case) self.generator.save_pretrained(_snake_case) @classmethod def lowerCamelCase ( cls : Optional[Any] , _snake_case : Optional[Any] , **_snake_case : Optional[int]): """simple docstring""" from ..auto.tokenization_auto import AutoTokenizer UpperCAmelCase_ = kwargs.pop('''config''' , _snake_case) if config is None: UpperCAmelCase_ = RagConfig.from_pretrained(_snake_case) UpperCAmelCase_ = AutoTokenizer.from_pretrained( _snake_case , config=config.question_encoder , subfolder='''question_encoder_tokenizer''') UpperCAmelCase_ = AutoTokenizer.from_pretrained( _snake_case , config=config.generator , subfolder='''generator_tokenizer''') return cls(question_encoder=_snake_case , generator=_snake_case) def __call__( self : List[Any] , *_snake_case : List[str] , **_snake_case : List[Any]): """simple docstring""" return self.current_tokenizer(*_snake_case , **_snake_case) def lowerCamelCase ( self : List[Any] , *_snake_case : str , **_snake_case : Union[str, Any]): """simple docstring""" return self.generator.batch_decode(*_snake_case , **_snake_case) def lowerCamelCase ( self : str , *_snake_case : Optional[int] , **_snake_case : Any): """simple docstring""" return self.generator.decode(*_snake_case , **_snake_case) def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = self.question_encoder def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = self.generator def lowerCamelCase ( self : Optional[Any] , _snake_case : List[str] , _snake_case : Optional[List[str]] = None , _snake_case : Optional[int] = None , _snake_case : Optional[int] = None , _snake_case : str = "longest" , _snake_case : str = None , _snake_case : bool = True , **_snake_case : Optional[int] , ): """simple docstring""" warnings.warn( '''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ''' '''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ''' '''context manager to prepare your targets. See the documentation of your specific tokenizer for more ''' '''details''' , _snake_case , ) if max_length is None: UpperCAmelCase_ = self.current_tokenizer.model_max_length UpperCAmelCase_ = self( _snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , max_length=_snake_case , padding=_snake_case , truncation=_snake_case , **_snake_case , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: UpperCAmelCase_ = self.current_tokenizer.model_max_length UpperCAmelCase_ = self( text_target=_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , **_snake_case , ) UpperCAmelCase_ = labels['''input_ids'''] return model_inputs
7
0
from math import sqrt def A (__A : int ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(sqrt(__A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A (__A : int = 10001 ) -> int: """simple docstring""" UpperCAmelCase_ = 0 UpperCAmelCase_ = 1 while count != nth and number < 3: number += 1 if is_prime(__A ): count += 1 while count != nth: number += 2 if is_prime(__A ): count += 1 return number if __name__ == "__main__": print(f"{solution() = }")
364
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class __snake_case ( unittest.TestCase ): @slow def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-base''') UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]]) # The dog is cute and lives in the garden house UpperCAmelCase_ = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim UpperCAmelCase_ = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach() self.assertEqual(output.shape , _snake_case) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3)) @slow def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-large''') UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]]) # The dog is cute and lives in the garden house UpperCAmelCase_ = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim UpperCAmelCase_ = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach() self.assertEqual(output.shape , _snake_case) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3))
7
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) snake_case_ : int = { "configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Optional[int] = ["MobileViTFeatureExtractor"] snake_case_ : Dict = ["MobileViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Optional[int] = [ "MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileViTForImageClassification", "MobileViTForSemanticSegmentation", "MobileViTModel", "MobileViTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Any = [ "TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFMobileViTForImageClassification", "TFMobileViTForSemanticSegmentation", "TFMobileViTModel", "TFMobileViTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys snake_case_ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
365
from maths.prime_factors import prime_factors def A (__A : int ) -> int: """simple docstring""" if not isinstance(__A , __A ): UpperCAmelCase_ = F"""Input value of [number={number}] must be an integer""" raise TypeError(__A ) if number < 1: raise ValueError('''Input must be a positive integer''' ) return -1 if len(prime_factors(__A ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
7
0
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging snake_case_ : Any = logging.get_logger(__name__) snake_case_ : List[str] = { "Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json", # See all Marian models at https://huggingface.co/models?filter=marian } class __snake_case ( a ): UpperCAmelCase__ : Tuple = '''marian''' UpperCAmelCase__ : List[Any] = ['''past_key_values'''] UpperCAmelCase__ : str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : int , _snake_case : List[str]=58101 , _snake_case : List[Any]=None , _snake_case : List[Any]=1024 , _snake_case : Union[str, Any]=12 , _snake_case : int=4096 , _snake_case : Optional[Any]=16 , _snake_case : Union[str, Any]=12 , _snake_case : Optional[int]=4096 , _snake_case : int=16 , _snake_case : Tuple=0.0 , _snake_case : Optional[int]=0.0 , _snake_case : Optional[Any]=True , _snake_case : Union[str, Any]=True , _snake_case : Any="gelu" , _snake_case : Optional[Any]=1024 , _snake_case : Optional[Any]=0.1 , _snake_case : Union[str, Any]=0.0 , _snake_case : List[Any]=0.0 , _snake_case : Tuple=0.0_2 , _snake_case : str=58100 , _snake_case : Union[str, Any]=False , _snake_case : Any=58100 , _snake_case : Dict=0 , _snake_case : Union[str, Any]=0 , _snake_case : Any=True , **_snake_case : Any , ): """simple docstring""" UpperCAmelCase_ = vocab_size UpperCAmelCase_ = decoder_vocab_size or vocab_size UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = d_model UpperCAmelCase_ = encoder_ffn_dim UpperCAmelCase_ = encoder_layers UpperCAmelCase_ = encoder_attention_heads UpperCAmelCase_ = decoder_ffn_dim UpperCAmelCase_ = decoder_layers UpperCAmelCase_ = decoder_attention_heads UpperCAmelCase_ = dropout UpperCAmelCase_ = attention_dropout UpperCAmelCase_ = activation_dropout UpperCAmelCase_ = activation_function UpperCAmelCase_ = init_std UpperCAmelCase_ = encoder_layerdrop UpperCAmelCase_ = decoder_layerdrop UpperCAmelCase_ = use_cache UpperCAmelCase_ = encoder_layers UpperCAmelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True UpperCAmelCase_ = share_encoder_decoder_embeddings super().__init__( pad_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , decoder_start_token_id=_snake_case , forced_eos_token_id=_snake_case , **_snake_case , ) class __snake_case ( a ): @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: UpperCAmelCase_ = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ]) if self.use_past: UpperCAmelCase_ = {0: '''batch'''} UpperCAmelCase_ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: UpperCAmelCase_ = {0: '''batch''', 1: '''decoder_sequence'''} UpperCAmelCase_ = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(_snake_case , direction='''inputs''') elif self.task == "causal-lm": # TODO: figure this case out. UpperCAmelCase_ = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ]) if self.use_past: UpperCAmelCase_ , UpperCAmelCase_ = self.num_layers for i in range(_snake_case): UpperCAmelCase_ = {0: '''batch''', 2: '''past_sequence + sequence'''} UpperCAmelCase_ = {0: '''batch''', 2: '''past_sequence + sequence'''} else: UpperCAmelCase_ = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ]) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def lowerCamelCase ( self : List[Any]): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: UpperCAmelCase_ = super().outputs else: UpperCAmelCase_ = super(_snake_case , self).outputs if self.use_past: UpperCAmelCase_ , UpperCAmelCase_ = self.num_layers for i in range(_snake_case): UpperCAmelCase_ = {0: '''batch''', 2: '''past_sequence + sequence'''} UpperCAmelCase_ = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def lowerCamelCase ( self : Tuple , _snake_case : PreTrainedTokenizer , _snake_case : int = -1 , _snake_case : int = -1 , _snake_case : bool = False , _snake_case : Optional[TensorType] = None , ): """simple docstring""" UpperCAmelCase_ = self._generate_dummy_inputs_for_encoder_and_decoder( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case) # Generate decoder inputs UpperCAmelCase_ = seq_length if not self.use_past else 1 UpperCAmelCase_ = self._generate_dummy_inputs_for_encoder_and_decoder( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case) UpperCAmelCase_ = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} UpperCAmelCase_ = dict(**_snake_case , **_snake_case) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''') else: import torch UpperCAmelCase_ , UpperCAmelCase_ = common_inputs['''input_ids'''].shape UpperCAmelCase_ = common_inputs['''decoder_input_ids'''].shape[1] UpperCAmelCase_ , UpperCAmelCase_ = self.num_attention_heads UpperCAmelCase_ = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCAmelCase_ = decoder_seq_length + 3 UpperCAmelCase_ = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) UpperCAmelCase_ = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(_snake_case , _snake_case)] , dim=1) UpperCAmelCase_ = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered UpperCAmelCase_ , UpperCAmelCase_ = self.num_layers UpperCAmelCase_ = min(_snake_case , _snake_case) UpperCAmelCase_ = max(_snake_case , _snake_case) - min_num_layers UpperCAmelCase_ = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(_snake_case): common_inputs["past_key_values"].append( ( torch.zeros(_snake_case), torch.zeros(_snake_case), torch.zeros(_snake_case), torch.zeros(_snake_case), )) # TODO: test this. UpperCAmelCase_ = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(_snake_case , _snake_case): common_inputs["past_key_values"].append((torch.zeros(_snake_case), torch.zeros(_snake_case))) return common_inputs def lowerCamelCase ( self : Any , _snake_case : PreTrainedTokenizer , _snake_case : int = -1 , _snake_case : int = -1 , _snake_case : bool = False , _snake_case : Optional[TensorType] = None , ): """simple docstring""" UpperCAmelCase_ = self._generate_dummy_inputs_for_encoder_and_decoder( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''') else: import torch UpperCAmelCase_ , UpperCAmelCase_ = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values UpperCAmelCase_ = seqlen + 2 UpperCAmelCase_ , UpperCAmelCase_ = self.num_layers UpperCAmelCase_ , UpperCAmelCase_ = self.num_attention_heads UpperCAmelCase_ = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCAmelCase_ = common_inputs['''attention_mask'''].dtype UpperCAmelCase_ = torch.cat( [common_inputs['''attention_mask'''], torch.ones(_snake_case , _snake_case , dtype=_snake_case)] , dim=1) UpperCAmelCase_ = [ (torch.zeros(_snake_case), torch.zeros(_snake_case)) for _ in range(_snake_case) ] return common_inputs def lowerCamelCase ( self : Dict , _snake_case : PreTrainedTokenizer , _snake_case : int = -1 , _snake_case : int = -1 , _snake_case : bool = False , _snake_case : Optional[TensorType] = None , ): """simple docstring""" UpperCAmelCase_ = compute_effective_axis_dimension( _snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCAmelCase_ = tokenizer.num_special_tokens_to_add(_snake_case) UpperCAmelCase_ = compute_effective_axis_dimension( _snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_snake_case) # Generate dummy inputs according to compute batch and sequence UpperCAmelCase_ = [''' '''.join([tokenizer.unk_token]) * seq_length] * batch_size UpperCAmelCase_ = dict(tokenizer(_snake_case , return_tensors=_snake_case)) return common_inputs def lowerCamelCase ( self : List[Any] , _snake_case : PreTrainedTokenizer , _snake_case : int = -1 , _snake_case : int = -1 , _snake_case : bool = False , _snake_case : Optional[TensorType] = None , ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: UpperCAmelCase_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm( _snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case) else: UpperCAmelCase_ = self._generate_dummy_inputs_for_causal_lm( _snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case) return common_inputs def lowerCamelCase ( self : Any , _snake_case : Tuple , _snake_case : Any , _snake_case : Optional[int] , _snake_case : List[Any]): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: UpperCAmelCase_ = super()._flatten_past_key_values_(_snake_case , _snake_case , _snake_case , _snake_case) else: UpperCAmelCase_ = super(_snake_case , self)._flatten_past_key_values_( _snake_case , _snake_case , _snake_case , _snake_case) @property def lowerCamelCase ( self : Optional[int]): """simple docstring""" return 1e-4
366
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class __snake_case ( unittest.TestCase ): def lowerCamelCase ( self : Optional[int] , _snake_case : Union[str, Any]): """simple docstring""" for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss''']): UpperCAmelCase_ = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(_snake_case) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = '''sgugger/tiny-distilbert-classification''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , only_pretrain_model=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , torchscript=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''') def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , fpaa=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) # set architectures equal to `None` UpperCAmelCase_ = None UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config]) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) @unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''') def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_snake_case , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config]) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tinier_bart''' UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config]) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config]) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tinier_bart''' UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config]) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , save_to_csv=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_snake_case , '''inf_time.csv''') , train_memory_csv_file=os.path.join(_snake_case , '''train_mem.csv''') , inference_memory_csv_file=os.path.join(_snake_case , '''inf_mem.csv''') , train_time_csv_file=os.path.join(_snake_case , '''train_time.csv''') , env_info_csv_file=os.path.join(_snake_case , '''env.csv''') , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) benchmark.run() self.assertTrue(Path(os.path.join(_snake_case , '''inf_time.csv''')).exists()) self.assertTrue(Path(os.path.join(_snake_case , '''train_time.csv''')).exists()) self.assertTrue(Path(os.path.join(_snake_case , '''inf_mem.csv''')).exists()) self.assertTrue(Path(os.path.join(_snake_case , '''train_mem.csv''')).exists()) self.assertTrue(Path(os.path.join(_snake_case , '''env.csv''')).exists()) def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(_snake_case : Tuple): self.assertTrue(hasattr(_snake_case , '''sequential''')) self.assertTrue(hasattr(_snake_case , '''cumulative''')) self.assertTrue(hasattr(_snake_case , '''current''')) self.assertTrue(hasattr(_snake_case , '''total''')) with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_snake_case , '''log.txt''') , log_print=_snake_case , trace_memory_line_by_line=_snake_case , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() _check_summary_is_not_empty(result.inference_summary) _check_summary_is_not_empty(result.train_summary) self.assertTrue(Path(os.path.join(_snake_case , '''log.txt''')).exists())
7
0
import inspect from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel, VQModel from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __snake_case ( a ): def __init__( self : Any , _snake_case : VQModel , _snake_case : UNetaDModel , _snake_case : DDIMScheduler): """simple docstring""" super().__init__() self.register_modules(vqvae=_snake_case , unet=_snake_case , scheduler=_snake_case) @torch.no_grad() def __call__( self : List[str] , _snake_case : int = 1 , _snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _snake_case : float = 0.0 , _snake_case : int = 50 , _snake_case : Optional[str] = "pil" , _snake_case : bool = True , **_snake_case : str , ): """simple docstring""" UpperCAmelCase_ = randn_tensor( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=_snake_case , ) UpperCAmelCase_ = latents.to(self.device) # scale the initial noise by the standard deviation required by the scheduler UpperCAmelCase_ = latents * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(_snake_case) # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature UpperCAmelCase_ = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys()) UpperCAmelCase_ = {} if accepts_eta: UpperCAmelCase_ = eta for t in self.progress_bar(self.scheduler.timesteps): UpperCAmelCase_ = self.scheduler.scale_model_input(_snake_case , _snake_case) # predict the noise residual UpperCAmelCase_ = self.unet(_snake_case , _snake_case).sample # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase_ = self.scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample # decode the image latents with the VAE UpperCAmelCase_ = self.vqvae.decode(_snake_case).sample UpperCAmelCase_ = (image / 2 + 0.5).clamp(0 , 1) UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": UpperCAmelCase_ = self.numpy_to_pil(_snake_case) if not return_dict: return (image,) return ImagePipelineOutput(images=_snake_case)
367
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def A (__A : BertModel , __A : str , __A : str ) -> int: """simple docstring""" UpperCAmelCase_ = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''') UpperCAmelCase_ = ( ('''layer.''', '''layer_'''), ('''word_embeddings.weight''', '''word_embeddings'''), ('''position_embeddings.weight''', '''position_embeddings'''), ('''token_type_embeddings.weight''', '''token_type_embeddings'''), ('''.''', '''/'''), ('''LayerNorm/weight''', '''LayerNorm/gamma'''), ('''LayerNorm/bias''', '''LayerNorm/beta'''), ('''weight''', '''kernel'''), ) if not os.path.isdir(__A ): os.makedirs(__A ) UpperCAmelCase_ = model.state_dict() def to_tf_var_name(__A : str ): for patt, repl in iter(__A ): UpperCAmelCase_ = name.replace(__A , __A ) return F"""bert/{name}""" def create_tf_var(__A : np.ndarray , __A : str , __A : tf.Session ): UpperCAmelCase_ = tf.dtypes.as_dtype(tensor.dtype ) UpperCAmelCase_ = tf.get_variable(dtype=__A , shape=tensor.shape , name=__A , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__A ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: UpperCAmelCase_ = to_tf_var_name(__A ) UpperCAmelCase_ = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): UpperCAmelCase_ = torch_tensor.T UpperCAmelCase_ = create_tf_var(tensor=__A , name=__A , session=__A ) tf.keras.backend.set_value(__A , __A ) UpperCAmelCase_ = session.run(__A ) print(F"""Successfully created {tf_name}: {np.allclose(__A , __A )}""" ) UpperCAmelCase_ = tf.train.Saver(tf.trainable_variables() ) saver.save(__A , os.path.join(__A , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) ) def A (__A : Any=None ) -> str: """simple docstring""" UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--model_name''' , type=__A , required=__A , help='''model name e.g. bert-base-uncased''' ) parser.add_argument( '''--cache_dir''' , type=__A , default=__A , required=__A , help='''Directory containing pytorch model''' ) parser.add_argument('''--pytorch_model_path''' , type=__A , required=__A , help='''/path/to/<pytorch-model-name>.bin''' ) parser.add_argument('''--tf_cache_dir''' , type=__A , required=__A , help='''Directory in which to save tensorflow model''' ) UpperCAmelCase_ = parser.parse_args(__A ) UpperCAmelCase_ = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=__A , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
7
0
import string import numpy def A (__A : int , __A : int ) -> int: """simple docstring""" return b if a == 0 else greatest_common_divisor(b % a , __A ) class __snake_case : UpperCAmelCase__ : Union[str, Any] = string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) UpperCAmelCase__ : Union[str, Any] = numpy.vectorize(lambda a : x % 3_6 ) UpperCAmelCase__ : List[str] = numpy.vectorize(a ) def __init__( self : str , _snake_case : numpy.ndarray): """simple docstring""" UpperCAmelCase_ = self.modulus(_snake_case) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key UpperCAmelCase_ = encrypt_key.shape[0] def lowerCamelCase ( self : Tuple , _snake_case : str): """simple docstring""" return self.key_string.index(_snake_case) def lowerCamelCase ( self : str , _snake_case : int): """simple docstring""" return self.key_string[round(_snake_case)] def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = round(numpy.linalg.det(self.encrypt_key)) if det < 0: UpperCAmelCase_ = det % len(self.key_string) UpperCAmelCase_ = len(self.key_string) if greatest_common_divisor(_snake_case , len(self.key_string)) != 1: UpperCAmelCase_ = ( F"""determinant modular {req_l} of encryption key({det}) """ F"""is not co prime w.r.t {req_l}.\nTry another key.""" ) raise ValueError(_snake_case) def lowerCamelCase ( self : Optional[int] , _snake_case : str): """simple docstring""" UpperCAmelCase_ = [char for char in text.upper() if char in self.key_string] UpperCAmelCase_ = chars[-1] while len(_snake_case) % self.break_key != 0: chars.append(_snake_case) return "".join(_snake_case) def lowerCamelCase ( self : List[Any] , _snake_case : str): """simple docstring""" UpperCAmelCase_ = self.process_text(text.upper()) UpperCAmelCase_ = '''''' for i in range(0 , len(_snake_case) - self.break_key + 1 , self.break_key): UpperCAmelCase_ = text[i : i + self.break_key] UpperCAmelCase_ = [self.replace_letters(_snake_case) for char in batch] UpperCAmelCase_ = numpy.array([vec]).T UpperCAmelCase_ = self.modulus(self.encrypt_key.dot(_snake_case)).T.tolist()[ 0 ] UpperCAmelCase_ = ''''''.join( self.replace_digits(_snake_case) for num in batch_encrypted) encrypted += encrypted_batch return encrypted def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ = round(numpy.linalg.det(self.encrypt_key)) if det < 0: UpperCAmelCase_ = det % len(self.key_string) UpperCAmelCase_ = None for i in range(len(self.key_string)): if (det * i) % len(self.key_string) == 1: UpperCAmelCase_ = i break UpperCAmelCase_ = ( det_inv * numpy.linalg.det(self.encrypt_key) * numpy.linalg.inv(self.encrypt_key) ) return self.to_int(self.modulus(_snake_case)) def lowerCamelCase ( self : Dict , _snake_case : str): """simple docstring""" UpperCAmelCase_ = self.make_decrypt_key() UpperCAmelCase_ = self.process_text(text.upper()) UpperCAmelCase_ = '''''' for i in range(0 , len(_snake_case) - self.break_key + 1 , self.break_key): UpperCAmelCase_ = text[i : i + self.break_key] UpperCAmelCase_ = [self.replace_letters(_snake_case) for char in batch] UpperCAmelCase_ = numpy.array([vec]).T UpperCAmelCase_ = self.modulus(decrypt_key.dot(_snake_case)).T.tolist()[0] UpperCAmelCase_ = ''''''.join( self.replace_digits(_snake_case) for num in batch_decrypted) decrypted += decrypted_batch return decrypted def A () -> None: """simple docstring""" UpperCAmelCase_ = int(input('''Enter the order of the encryption key: ''' ) ) UpperCAmelCase_ = [] print('''Enter each row of the encryption key with space separated integers''' ) for _ in range(__A ): UpperCAmelCase_ = [int(__A ) for x in input().split()] hill_matrix.append(__A ) UpperCAmelCase_ = HillCipher(numpy.array(__A ) ) print('''Would you like to encrypt or decrypt some text? (1 or 2)''' ) UpperCAmelCase_ = input('''\n1. Encrypt\n2. Decrypt\n''' ) if option == "1": UpperCAmelCase_ = input('''What text would you like to encrypt?: ''' ) print('''Your encrypted text is:''' ) print(hc.encrypt(__A ) ) elif option == "2": UpperCAmelCase_ = input('''What text would you like to decrypt?: ''' ) print('''Your decrypted text is:''' ) print(hc.decrypt(__A ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
368
import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __snake_case ( unittest.TestCase ): def __init__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict=3 , _snake_case : Dict=32 , _snake_case : List[str]=3 , _snake_case : Union[str, Any]=10 , _snake_case : Tuple=[10, 20, 30, 40] , _snake_case : Dict=[1, 1, 2, 1] , _snake_case : List[Any]=True , _snake_case : Dict=True , _snake_case : Union[str, Any]="relu" , _snake_case : Tuple=3 , _snake_case : Union[str, Any]=None , ): """simple docstring""" UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = image_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = embeddings_size UpperCAmelCase_ = hidden_sizes UpperCAmelCase_ = depths UpperCAmelCase_ = is_training UpperCAmelCase_ = use_labels UpperCAmelCase_ = hidden_act UpperCAmelCase_ = num_labels UpperCAmelCase_ = scope UpperCAmelCase_ = len(_snake_case) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) UpperCAmelCase_ = self.get_config() return config, pixel_values def lowerCamelCase ( self : List[Any]): """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowerCamelCase ( self : Optional[int] , _snake_case : List[Any] , _snake_case : Optional[int]): """simple docstring""" UpperCAmelCase_ = FlaxRegNetModel(config=_snake_case) UpperCAmelCase_ = model(_snake_case) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : Optional[int]): """simple docstring""" UpperCAmelCase_ = self.num_labels UpperCAmelCase_ = FlaxRegNetForImageClassification(config=_snake_case) UpperCAmelCase_ = model(_snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs UpperCAmelCase_ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_flax class __snake_case ( a , unittest.TestCase ): UpperCAmelCase__ : Union[str, Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () UpperCAmelCase__ : Tuple = False UpperCAmelCase__ : List[str] = False UpperCAmelCase__ : int = False def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = FlaxRegNetModelTester(self) UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case) def lowerCamelCase ( self : List[Any]): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase ( self : List[str]): """simple docstring""" return def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case) @unittest.skip(reason='''RegNet does not use inputs_embeds''') def lowerCamelCase ( self : Optional[Any]): """simple docstring""" pass @unittest.skip(reason='''RegNet does not support input and output embeddings''') def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" pass def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ = model_class(_snake_case) UpperCAmelCase_ = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ = [*signature.parameters.keys()] UpperCAmelCase_ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _snake_case) def lowerCamelCase ( self : Optional[int]): """simple docstring""" def check_hidden_states_output(_snake_case : List[str] , _snake_case : Dict , _snake_case : List[str]): UpperCAmelCase_ = model_class(_snake_case) UpperCAmelCase_ = model(**self._prepare_for_class(_snake_case , _snake_case)) UpperCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase_ = self.model_tester.num_stages self.assertEqual(len(_snake_case) , expected_num_stages + 1) UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ = True check_hidden_states_output(_snake_case , _snake_case , _snake_case) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ = True check_hidden_states_output(_snake_case , _snake_case , _snake_case) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): UpperCAmelCase_ = self._prepare_for_class(_snake_case , _snake_case) UpperCAmelCase_ = model_class(_snake_case) @jax.jit def model_jitted(_snake_case : str , **_snake_case : Union[str, Any]): return model(pixel_values=_snake_case , **_snake_case) with self.subTest('''JIT Enabled'''): UpperCAmelCase_ = model_jitted(**_snake_case).to_tuple() with self.subTest('''JIT Disabled'''): with jax.disable_jit(): UpperCAmelCase_ = model_jitted(**_snake_case).to_tuple() self.assertEqual(len(_snake_case) , len(_snake_case)) for jitted_output, output in zip(_snake_case , _snake_case): self.assertEqual(jitted_output.shape , output.shape) def A () -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_flax class __snake_case ( unittest.TestCase ): @cached_property def lowerCamelCase ( self : Dict): """simple docstring""" return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''') if is_vision_available() else None @slow def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''') UpperCAmelCase_ = self.default_image_processor UpperCAmelCase_ = prepare_img() UpperCAmelCase_ = image_processor(images=_snake_case , return_tensors='''np''') UpperCAmelCase_ = model(**_snake_case) # verify the logits UpperCAmelCase_ = (1, 1000) self.assertEqual(outputs.logits.shape , _snake_case) UpperCAmelCase_ = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6]) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4))
7
0
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging snake_case_ : Tuple = logging.get_logger(__name__) snake_case_ : str = { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class __snake_case ( a ): UpperCAmelCase__ : Any = '''blenderbot-small''' UpperCAmelCase__ : List[Any] = ['''past_key_values'''] UpperCAmelCase__ : List[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : List[str] , _snake_case : List[str]=50265 , _snake_case : Optional[Any]=512 , _snake_case : Optional[Any]=8 , _snake_case : str=2048 , _snake_case : Tuple=16 , _snake_case : int=8 , _snake_case : int=2048 , _snake_case : Dict=16 , _snake_case : Any=0.0 , _snake_case : List[Any]=0.0 , _snake_case : Tuple=True , _snake_case : Union[str, Any]=True , _snake_case : int="gelu" , _snake_case : List[str]=512 , _snake_case : int=0.1 , _snake_case : str=0.0 , _snake_case : List[str]=0.0 , _snake_case : Optional[int]=0.0_2 , _snake_case : Optional[Any]=1 , _snake_case : Dict=False , _snake_case : Tuple=0 , _snake_case : Optional[Any]=1 , _snake_case : Dict=2 , _snake_case : Union[str, Any]=2 , **_snake_case : int , ): """simple docstring""" UpperCAmelCase_ = vocab_size UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = d_model UpperCAmelCase_ = encoder_ffn_dim UpperCAmelCase_ = encoder_layers UpperCAmelCase_ = encoder_attention_heads UpperCAmelCase_ = decoder_ffn_dim UpperCAmelCase_ = decoder_layers UpperCAmelCase_ = decoder_attention_heads UpperCAmelCase_ = dropout UpperCAmelCase_ = attention_dropout UpperCAmelCase_ = activation_dropout UpperCAmelCase_ = activation_function UpperCAmelCase_ = init_std UpperCAmelCase_ = encoder_layerdrop UpperCAmelCase_ = decoder_layerdrop UpperCAmelCase_ = use_cache UpperCAmelCase_ = encoder_layers UpperCAmelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , decoder_start_token_id=_snake_case , forced_eos_token_id=_snake_case , **_snake_case , ) class __snake_case ( a ): @property def lowerCamelCase ( self : int): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: UpperCAmelCase_ = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ]) if self.use_past: UpperCAmelCase_ = {0: '''batch'''} UpperCAmelCase_ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: UpperCAmelCase_ = {0: '''batch''', 1: '''decoder_sequence'''} UpperCAmelCase_ = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(_snake_case , direction='''inputs''') elif self.task == "causal-lm": # TODO: figure this case out. UpperCAmelCase_ = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ]) if self.use_past: UpperCAmelCase_ , UpperCAmelCase_ = self.num_layers for i in range(_snake_case): UpperCAmelCase_ = {0: '''batch''', 2: '''past_sequence + sequence'''} UpperCAmelCase_ = {0: '''batch''', 2: '''past_sequence + sequence'''} else: UpperCAmelCase_ = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ]) return common_inputs @property def lowerCamelCase ( self : Dict): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: UpperCAmelCase_ = super().outputs else: UpperCAmelCase_ = super(_snake_case , self).outputs if self.use_past: UpperCAmelCase_ , UpperCAmelCase_ = self.num_layers for i in range(_snake_case): UpperCAmelCase_ = {0: '''batch''', 2: '''past_sequence + sequence'''} UpperCAmelCase_ = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def lowerCamelCase ( self : Any , _snake_case : PreTrainedTokenizer , _snake_case : int = -1 , _snake_case : int = -1 , _snake_case : bool = False , _snake_case : Optional[TensorType] = None , ): """simple docstring""" UpperCAmelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case) # Generate decoder inputs UpperCAmelCase_ = seq_length if not self.use_past else 1 UpperCAmelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case) UpperCAmelCase_ = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} UpperCAmelCase_ = dict(**_snake_case , **_snake_case) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''') else: import torch UpperCAmelCase_ , UpperCAmelCase_ = common_inputs['''input_ids'''].shape UpperCAmelCase_ = common_inputs['''decoder_input_ids'''].shape[1] UpperCAmelCase_ , UpperCAmelCase_ = self.num_attention_heads UpperCAmelCase_ = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCAmelCase_ = decoder_seq_length + 3 UpperCAmelCase_ = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) UpperCAmelCase_ = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(_snake_case , _snake_case)] , dim=1) UpperCAmelCase_ = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered UpperCAmelCase_ , UpperCAmelCase_ = self.num_layers UpperCAmelCase_ = min(_snake_case , _snake_case) UpperCAmelCase_ = max(_snake_case , _snake_case) - min_num_layers UpperCAmelCase_ = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(_snake_case): common_inputs["past_key_values"].append( ( torch.zeros(_snake_case), torch.zeros(_snake_case), torch.zeros(_snake_case), torch.zeros(_snake_case), )) # TODO: test this. UpperCAmelCase_ = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(_snake_case , _snake_case): common_inputs["past_key_values"].append((torch.zeros(_snake_case), torch.zeros(_snake_case))) return common_inputs def lowerCamelCase ( self : Optional[int] , _snake_case : PreTrainedTokenizer , _snake_case : int = -1 , _snake_case : int = -1 , _snake_case : bool = False , _snake_case : Optional[TensorType] = None , ): """simple docstring""" UpperCAmelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''') else: import torch UpperCAmelCase_ , UpperCAmelCase_ = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values UpperCAmelCase_ = seqlen + 2 UpperCAmelCase_ , UpperCAmelCase_ = self.num_layers UpperCAmelCase_ , UpperCAmelCase_ = self.num_attention_heads UpperCAmelCase_ = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCAmelCase_ = common_inputs['''attention_mask'''].dtype UpperCAmelCase_ = torch.cat( [common_inputs['''attention_mask'''], torch.ones(_snake_case , _snake_case , dtype=_snake_case)] , dim=1) UpperCAmelCase_ = [ (torch.zeros(_snake_case), torch.zeros(_snake_case)) for _ in range(_snake_case) ] return common_inputs def lowerCamelCase ( self : Any , _snake_case : PreTrainedTokenizer , _snake_case : int = -1 , _snake_case : int = -1 , _snake_case : bool = False , _snake_case : Optional[TensorType] = None , ): """simple docstring""" UpperCAmelCase_ = compute_effective_axis_dimension( _snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCAmelCase_ = tokenizer.num_special_tokens_to_add(_snake_case) UpperCAmelCase_ = compute_effective_axis_dimension( _snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_snake_case) # Generate dummy inputs according to compute batch and sequence UpperCAmelCase_ = [''' '''.join([tokenizer.unk_token]) * seq_length] * batch_size UpperCAmelCase_ = dict(tokenizer(_snake_case , return_tensors=_snake_case)) return common_inputs def lowerCamelCase ( self : Dict , _snake_case : PreTrainedTokenizer , _snake_case : int = -1 , _snake_case : int = -1 , _snake_case : bool = False , _snake_case : Optional[TensorType] = None , ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: UpperCAmelCase_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm( _snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case) elif self.task == "causal-lm": UpperCAmelCase_ = self._generate_dummy_inputs_for_causal_lm( _snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case) else: UpperCAmelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case) return common_inputs def lowerCamelCase ( self : Any , _snake_case : Union[str, Any] , _snake_case : Optional[int] , _snake_case : Any , _snake_case : Optional[int]): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: UpperCAmelCase_ = super()._flatten_past_key_values_(_snake_case , _snake_case , _snake_case , _snake_case) else: UpperCAmelCase_ = super(_snake_case , self)._flatten_past_key_values_( _snake_case , _snake_case , _snake_case , _snake_case)
369
import comet # From: unbabel-comet import torch import datasets snake_case_ : Tuple = datasets.logging.get_logger(__name__) snake_case_ : str = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n" snake_case_ : Tuple = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n" snake_case_ : Optional[int] = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __snake_case ( datasets.Metric ): def lowerCamelCase ( self : Any): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''sources''': datasets.Value('''string''' , id='''sequence'''), '''predictions''': datasets.Value('''string''' , id='''sequence'''), '''references''': datasets.Value('''string''' , id='''sequence'''), }) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[ '''https://github.com/Unbabel/COMET''', '''https://www.aclweb.org/anthology/2020.emnlp-main.213/''', '''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''', ] , ) def lowerCamelCase ( self : List[Any] , _snake_case : Optional[int]): """simple docstring""" if self.config_name == "default": UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''')) else: UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model(self.config_name)) def lowerCamelCase ( self : List[Any] , _snake_case : str , _snake_case : List[str] , _snake_case : Tuple , _snake_case : int=None , _snake_case : Optional[Any]=False): """simple docstring""" if gpus is None: UpperCAmelCase_ = 1 if torch.cuda.is_available() else 0 UpperCAmelCase_ = {'''src''': sources, '''mt''': predictions, '''ref''': references} UpperCAmelCase_ = [dict(zip(_snake_case , _snake_case)) for t in zip(*data.values())] UpperCAmelCase_ , UpperCAmelCase_ = self.scorer.predict(_snake_case , gpus=_snake_case , progress_bar=_snake_case) return {"mean_score": mean_score, "scores": scores}
7
0
import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class __snake_case ( unittest.TestCase ): def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = 0 @slow def lowerCamelCase ( self : Any): """simple docstring""" for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case) self.assertIsNotNone(_snake_case) self.assertIsInstance(_snake_case , (BertTokenizer, BertTokenizerFast)) self.assertGreater(len(_snake_case) , 0) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case) self.assertIsNotNone(_snake_case) self.assertIsInstance(_snake_case , (GPTaTokenizer, GPTaTokenizerFast)) self.assertGreater(len(_snake_case) , 0) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case) self.assertIsInstance(_snake_case , (BertTokenizer, BertTokenizerFast)) self.assertEqual(tokenizer.vocab_size , 12) def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case) self.assertIsInstance(_snake_case , (RobertaTokenizer, RobertaTokenizerFast)) self.assertEqual(tokenizer.vocab_size , 20) def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) self.assertIsInstance(_snake_case , _snake_case) # Check that tokenizer_type ≠ model_type UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case , config=_snake_case) self.assertIsInstance(_snake_case , (BertTokenizer, BertTokenizerFast)) self.assertEqual(tokenizer.vocab_size , 12) def lowerCamelCase ( self : str): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_snake_case , '''vocab.txt''')) UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case , tokenizer_type='''bert''' , use_fast=_snake_case) self.assertIsInstance(_snake_case , _snake_case) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_snake_case , '''vocab.json''')) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_snake_case , '''merges.txt''')) UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case , tokenizer_type='''gpt2''' , use_fast=_snake_case) self.assertIsInstance(_snake_case , _snake_case) @require_tokenizers def lowerCamelCase ( self : Tuple): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_snake_case , '''vocab.txt''')) UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case , tokenizer_type='''bert''') self.assertIsInstance(_snake_case , _snake_case) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_snake_case , '''vocab.json''')) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_snake_case , '''merges.txt''')) UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case , tokenizer_type='''gpt2''') self.assertIsInstance(_snake_case , _snake_case) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" with pytest.raises(_snake_case): AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''') @require_tokenizers def lowerCamelCase ( self : str): """simple docstring""" for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: UpperCAmelCase_ = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''') self.assertIsInstance(_snake_case , (BertTokenizer, BertTokenizerFast)) if isinstance(_snake_case , _snake_case): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _snake_case) else: self.assertEqual(tokenizer.do_lower_case , _snake_case) self.assertEqual(tokenizer.model_max_length , 512) @require_tokenizers def lowerCamelCase ( self : Optional[Any]): """simple docstring""" for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( _snake_case , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ): UpperCAmelCase_ = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''') def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = TOKENIZER_MAPPING.values() UpperCAmelCase_ = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(_snake_case) @require_tokenizers def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=_snake_case) , _snake_case) self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''') , _snake_case) @require_tokenizers def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=_snake_case) UpperCAmelCase_ = '''Hello, world. How are you?''' UpperCAmelCase_ = tokenizer.tokenize(_snake_case) self.assertEqual('''[UNK]''' , tokens[0]) UpperCAmelCase_ = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=_snake_case) UpperCAmelCase_ = tokenizer.tokenize(_snake_case) self.assertEqual('''[UNK]''' , tokens[0]) @require_tokenizers def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''') self.assertEqual(type(_snake_case) , _snake_case) self.assertEqual(tokenizer.model_max_length , 512) self.assertEqual(tokenizer.vocab_size , 30000) self.assertEqual(tokenizer.unk_token , '''[UNK]''') self.assertEqual(tokenizer.padding_side , '''right''') self.assertEqual(tokenizer.truncation_side , '''right''') def lowerCamelCase ( self : Any): """simple docstring""" UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case) self.assertIsInstance(_snake_case , (BertTokenizer, BertTokenizerFast)) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_snake_case) UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case) self.assertIsInstance(_snake_case , tokenizer.__class__) self.assertEqual(tokenizera.vocab_size , 12) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = AutoTokenizer.from_pretrained('''ctrl''') # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(_snake_case , _snake_case) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = get_tokenizer_config('''bert-base-cased''') UpperCAmelCase_ = config.pop('''_commit_hash''' , _snake_case) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(_snake_case , {'''do_lower_case''': False}) # This model does not have a tokenizer_config so we get back an empty dict. UpperCAmelCase_ = get_tokenizer_config(_snake_case) self.assertDictEqual(_snake_case , {}) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_snake_case) UpperCAmelCase_ = get_tokenizer_config(_snake_case) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''') def lowerCamelCase ( self : List[Any]): """simple docstring""" try: AutoConfig.register('''custom''' , _snake_case) AutoTokenizer.register(_snake_case , slow_tokenizer_class=_snake_case) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_snake_case): AutoTokenizer.register(_snake_case , slow_tokenizer_class=_snake_case) UpperCAmelCase_ = CustomTokenizer.from_pretrained(_snake_case) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_snake_case) UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case) self.assertIsInstance(_snake_case , _snake_case) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def lowerCamelCase ( self : str): """simple docstring""" try: AutoConfig.register('''custom''' , _snake_case) # Can register in two steps AutoTokenizer.register(_snake_case , slow_tokenizer_class=_snake_case) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None)) AutoTokenizer.register(_snake_case , fast_tokenizer_class=_snake_case) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast)) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( _snake_case , slow_tokenizer_class=_snake_case , fast_tokenizer_class=_snake_case) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast)) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_snake_case): AutoTokenizer.register(_snake_case , fast_tokenizer_class=_snake_case) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ = BertTokenizerFast.from_pretrained(_snake_case) bert_tokenizer.save_pretrained(_snake_case) UpperCAmelCase_ = CustomTokenizerFast.from_pretrained(_snake_case) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_snake_case) UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case) self.assertIsInstance(_snake_case , _snake_case) UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case , use_fast=_snake_case) self.assertIsInstance(_snake_case , _snake_case) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowerCamelCase ( self : Tuple): """simple docstring""" with self.assertRaises(_snake_case): UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''') # If remote code is disabled, we can't load this config. with self.assertRaises(_snake_case): UpperCAmelCase_ = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_snake_case) UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_snake_case) self.assertTrue(tokenizer.special_attribute_present) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_snake_case) UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case , trust_remote_code=_snake_case) self.assertTrue(reloaded_tokenizer.special_attribute_present) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''') self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''') # Test we can also load the slow version UpperCAmelCase_ = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_snake_case , use_fast=_snake_case) self.assertTrue(tokenizer.special_attribute_present) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''') # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_snake_case) UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case , trust_remote_code=_snake_case , use_fast=_snake_case) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''') self.assertTrue(reloaded_tokenizer.special_attribute_present) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''') self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''') @require_tokenizers def lowerCamelCase ( self : Any): """simple docstring""" class __snake_case ( a ): UpperCAmelCase__ : Dict = False class __snake_case ( a ): UpperCAmelCase__ : List[str] = NewTokenizer UpperCAmelCase__ : Tuple = False try: AutoConfig.register('''custom''' , _snake_case) AutoTokenizer.register(_snake_case , slow_tokenizer_class=_snake_case) AutoTokenizer.register(_snake_case , fast_tokenizer_class=_snake_case) # If remote code is not set, the default is to use local UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''') self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''') self.assertFalse(tokenizer.special_attribute_present) UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=_snake_case) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''') self.assertFalse(tokenizer.special_attribute_present) # If remote code is disabled, we load the local one. UpperCAmelCase_ = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_snake_case) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''') self.assertFalse(tokenizer.special_attribute_present) UpperCAmelCase_ = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_snake_case , use_fast=_snake_case) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''') self.assertFalse(tokenizer.special_attribute_present) # If remote is enabled, we load from the Hub UpperCAmelCase_ = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_snake_case) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''') self.assertTrue(tokenizer.special_attribute_present) UpperCAmelCase_ = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_snake_case , use_fast=_snake_case) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''') self.assertTrue(tokenizer.special_attribute_present) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_snake_case) self.assertTrue(tokenizer.special_attribute_present) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''') # Test we can also load the slow version UpperCAmelCase_ = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_snake_case , use_fast=_snake_case) self.assertTrue(tokenizer.special_attribute_present) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''') else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''') def lowerCamelCase ( self : int): """simple docstring""" with self.assertRaisesRegex( _snake_case , '''bert-base is not a local folder and is not a valid model identifier'''): UpperCAmelCase_ = AutoTokenizer.from_pretrained('''bert-base''') def lowerCamelCase ( self : str): """simple docstring""" with self.assertRaisesRegex( _snake_case , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'''): UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case , revision='''aaaaaa''') def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''') with RequestCounter() as counter: UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''') self.assertEqual(counter.get_request_count , 0) self.assertEqual(counter.head_request_count , 1) self.assertEqual(counter.other_request_count , 0)
370
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class __snake_case ( a ): UpperCAmelCase__ : Optional[int] = (DPMSolverSinglestepScheduler,) UpperCAmelCase__ : str = (('''num_inference_steps''', 2_5),) def lowerCamelCase ( self : Dict , **_snake_case : Dict): """simple docstring""" UpperCAmelCase_ = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''prediction_type''': '''epsilon''', '''thresholding''': False, '''sample_max_value''': 1.0, '''algorithm_type''': '''dpmsolver++''', '''solver_type''': '''midpoint''', '''lambda_min_clipped''': -float('''inf'''), '''variance_type''': None, } config.update(**_snake_case) return config def lowerCamelCase ( self : Dict , _snake_case : int=0 , **_snake_case : List[Any]): """simple docstring""" UpperCAmelCase_ = dict(self.forward_default_kwargs) UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case) UpperCAmelCase_ = self.dummy_sample UpperCAmelCase_ = 0.1 * sample UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: UpperCAmelCase_ = self.get_scheduler_config(**_snake_case) UpperCAmelCase_ = scheduler_class(**_snake_case) scheduler.set_timesteps(_snake_case) # copy over dummy past residuals UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_snake_case) UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case) new_scheduler.set_timesteps(_snake_case) # copy over dummy past residuals UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order] UpperCAmelCase_ , UpperCAmelCase_ = sample, sample for t in range(_snake_case , time_step + scheduler.config.solver_order + 1): UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def lowerCamelCase ( self : Tuple): """simple docstring""" pass def lowerCamelCase ( self : Tuple , _snake_case : Optional[Any]=0 , **_snake_case : int): """simple docstring""" UpperCAmelCase_ = dict(self.forward_default_kwargs) UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case) UpperCAmelCase_ = self.dummy_sample UpperCAmelCase_ = 0.1 * sample UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: UpperCAmelCase_ = self.get_scheduler_config() UpperCAmelCase_ = scheduler_class(**_snake_case) scheduler.set_timesteps(_snake_case) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_snake_case) UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case) # copy over dummy past residuals new_scheduler.set_timesteps(_snake_case) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order] UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def lowerCamelCase ( self : Dict , _snake_case : int=None , **_snake_case : Optional[Any]): """simple docstring""" if scheduler is None: UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config(**_snake_case) UpperCAmelCase_ = scheduler_class(**_snake_case) UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config(**_snake_case) UpperCAmelCase_ = scheduler_class(**_snake_case) UpperCAmelCase_ = 10 UpperCAmelCase_ = self.dummy_model() UpperCAmelCase_ = self.dummy_sample_deter scheduler.set_timesteps(_snake_case) for i, t in enumerate(scheduler.timesteps): UpperCAmelCase_ = model(_snake_case , _snake_case) UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample return sample def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config()) UpperCAmelCase_ = 50 UpperCAmelCase_ = self.dummy_model() UpperCAmelCase_ = self.dummy_sample_deter scheduler.set_timesteps(_snake_case) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:]): UpperCAmelCase_ = model(_snake_case , _snake_case) UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.2_5_7_4) < 1e-3 def lowerCamelCase ( self : int): """simple docstring""" for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=_snake_case) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config()) UpperCAmelCase_ = self.full_loop(scheduler=_snake_case) UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3 UpperCAmelCase_ = DEISMultistepScheduler.from_config(scheduler.config) UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config) UpperCAmelCase_ = UniPCMultistepScheduler.from_config(scheduler.config) UpperCAmelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config) UpperCAmelCase_ = self.full_loop(scheduler=_snake_case) UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3 def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" self.check_over_configs(thresholding=_snake_case) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , algorithm_type='''dpmsolver++''' , solver_order=_snake_case , solver_type=_snake_case , ) def lowerCamelCase ( self : Dict): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_snake_case) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , ) UpperCAmelCase_ = self.full_loop( solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , ) assert not torch.isnan(_snake_case).any(), "Samples have nan numbers" def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" self.check_over_configs(lower_order_final=_snake_case) self.check_over_configs(lower_order_final=_snake_case) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" self.check_over_configs(lambda_min_clipped=-float('''inf''')) self.check_over_configs(lambda_min_clipped=-5.1) def lowerCamelCase ( self : int): """simple docstring""" self.check_over_configs(variance_type=_snake_case) self.check_over_configs(variance_type='''learned_range''') def lowerCamelCase ( self : Optional[Any]): """simple docstring""" for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=_snake_case , time_step=0) def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = self.full_loop() UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3 def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = self.full_loop(use_karras_sigmas=_snake_case) UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.2_2_4_8) < 1e-3 def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''') UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.1_4_5_3) < 1e-3 def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_snake_case) UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.0_6_4_9) < 1e-3 def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config(thresholding=_snake_case , dynamic_thresholding_ratio=0) UpperCAmelCase_ = scheduler_class(**_snake_case) UpperCAmelCase_ = 10 UpperCAmelCase_ = self.dummy_model() UpperCAmelCase_ = self.dummy_sample_deter.half() scheduler.set_timesteps(_snake_case) for i, t in enumerate(scheduler.timesteps): UpperCAmelCase_ = model(_snake_case , _snake_case) UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample assert sample.dtype == torch.floataa
7
0
def A (__A : int = 4000000 ) -> int: """simple docstring""" UpperCAmelCase_ = [0, 1] UpperCAmelCase_ = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 UpperCAmelCase_ = 0 for j in range(len(__A ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(f"{solution() = }")
371
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) snake_case_ : List[Any] = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Tuple = ["DeiTFeatureExtractor"] snake_case_ : List[str] = ["DeiTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : List[Any] = [ "DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "DeiTForImageClassification", "DeiTForImageClassificationWithTeacher", "DeiTForMaskedImageModeling", "DeiTModel", "DeiTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Dict = [ "TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDeiTForImageClassification", "TFDeiTForImageClassificationWithTeacher", "TFDeiTForMaskedImageModeling", "TFDeiTModel", "TFDeiTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys snake_case_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
7
0
import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() snake_case_ : Any = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) snake_case_ : str = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight") ) rename_keys.append( (f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias") ) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias")) rename_keys.append( (f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias") ) rename_keys.append( ( f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight", f"decoder.layers.{i}.encoder_attn.out_proj.weight", ) ) rename_keys.append( ( f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias", f"decoder.layers.{i}.encoder_attn.out_proj.bias", ) ) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias") ) rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias")) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("input_proj.weight", "input_projection.weight"), ("input_proj.bias", "input_projection.bias"), ("query_embed.weight", "query_position_embeddings.weight"), ("transformer.encoder.norm.weight", "encoder.layernorm.weight"), ("transformer.encoder.norm.bias", "encoder.layernorm.bias"), ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), ("class_embed.weight", "class_labels_classifier.weight"), ("class_embed.bias", "class_labels_classifier.bias"), ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), ] ) def A (__A : Optional[Any] , __A : Tuple , __A : List[str] ) -> List[str]: """simple docstring""" UpperCAmelCase_ = state_dict.pop(__A ) UpperCAmelCase_ = val def A (__A : Optional[Any] ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: UpperCAmelCase_ = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' ) UpperCAmelCase_ = value else: UpperCAmelCase_ = value return new_state_dict def A (__A : Union[str, Any] ) -> Dict: """simple docstring""" UpperCAmelCase_ = '''''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) UpperCAmelCase_ = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) UpperCAmelCase_ = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ = in_proj_weight[:256, :] UpperCAmelCase_ = in_proj_bias[:256] UpperCAmelCase_ = in_proj_weight[256:512, :] UpperCAmelCase_ = in_proj_bias[256:512] UpperCAmelCase_ = in_proj_weight[-256:, :] UpperCAmelCase_ = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention UpperCAmelCase_ = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" ) UpperCAmelCase_ = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ = in_proj_weight[:256, :] UpperCAmelCase_ = in_proj_bias[:256] UpperCAmelCase_ = in_proj_weight[256:512, :] UpperCAmelCase_ = in_proj_bias[256:512] UpperCAmelCase_ = in_proj_weight[-256:, :] UpperCAmelCase_ = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention UpperCAmelCase_ = state_dict.pop( F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" ) UpperCAmelCase_ = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) of cross-attention to the state dict UpperCAmelCase_ = in_proj_weight_cross_attn[:256, :] UpperCAmelCase_ = in_proj_bias_cross_attn[:256] UpperCAmelCase_ = in_proj_weight_cross_attn[256:512, :] UpperCAmelCase_ = in_proj_bias_cross_attn[256:512] UpperCAmelCase_ = in_proj_weight_cross_attn[-256:, :] UpperCAmelCase_ = in_proj_bias_cross_attn[-256:] def A (__A : Optional[int] , __A : int ) -> List[str]: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = image.size UpperCAmelCase_ = max(__A , __A ) UpperCAmelCase_ = 800 if '''detection''' in checkpoint_url else 1000 UpperCAmelCase_ = target_max_size / current_max_size UpperCAmelCase_ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def A (__A : Tuple ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ = F.to_tensor(__A ) UpperCAmelCase_ = F.normalize(__A , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def A (__A : List[Any] , __A : Tuple , __A : str ) -> Optional[Any]: """simple docstring""" logger.info('''Converting model...''' ) # load original state dict UpperCAmelCase_ = torch.hub.load_state_dict_from_url(__A , map_location='''cpu''' ) # rename keys for src, dest in rename_keys: rename_key(__A , __A , __A ) UpperCAmelCase_ = rename_backbone_keys(__A ) # query, key and value matrices need special treatment read_in_q_k_v(__A ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them UpperCAmelCase_ = '''model.''' for key in state_dict.copy().keys(): if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ): UpperCAmelCase_ = state_dict.pop(__A ) UpperCAmelCase_ = val # create HuggingFace model and load state dict UpperCAmelCase_ = TableTransformerConfig( backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: UpperCAmelCase_ = 15 UpperCAmelCase_ = 2 UpperCAmelCase_ = {0: '''table''', 1: '''table rotated'''} UpperCAmelCase_ = idalabel UpperCAmelCase_ = {v: k for k, v in idalabel.items()} else: UpperCAmelCase_ = 125 UpperCAmelCase_ = 6 UpperCAmelCase_ = { 0: '''table''', 1: '''table column''', 2: '''table row''', 3: '''table column header''', 4: '''table projected row header''', 5: '''table spanning cell''', } UpperCAmelCase_ = idalabel UpperCAmelCase_ = {v: k for k, v in idalabel.items()} UpperCAmelCase_ = DetrImageProcessor( format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1000 ) UpperCAmelCase_ = TableTransformerForObjectDetection(__A ) model.load_state_dict(__A ) model.eval() # verify our conversion UpperCAmelCase_ = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png''' UpperCAmelCase_ = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=__A ) UpperCAmelCase_ = Image.open(__A ).convert('''RGB''' ) UpperCAmelCase_ = normalize(resize(__A , __A ) ).unsqueeze(0 ) UpperCAmelCase_ = model(__A ) if "detection" in checkpoint_url: UpperCAmelCase_ = (1, 15, 3) UpperCAmelCase_ = torch.tensor( [[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] ) UpperCAmelCase_ = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] ) else: UpperCAmelCase_ = (1, 125, 7) UpperCAmelCase_ = torch.tensor( [[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] ) UpperCAmelCase_ = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , __A , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __A , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(__A ).mkdir(exist_ok=__A ) model.save_pretrained(__A ) image_processor.save_pretrained(__A ) if push_to_hub: # Push model to HF hub logger.info('''Pushing model to the hub...''' ) UpperCAmelCase_ = ( '''microsoft/table-transformer-detection''' if '''detection''' in checkpoint_url else '''microsoft/table-transformer-structure-recognition''' ) model.push_to_hub(__A ) image_processor.push_to_hub(__A ) if __name__ == "__main__": snake_case_ : Optional[Any] = argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth", type=str, choices=[ "https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth", "https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth", ], help="URL of the Table Transformer checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) snake_case_ : Any = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
350
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets snake_case_ : Dict = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n" snake_case_ : List[str] = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n" snake_case_ : List[Any] = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __snake_case ( datasets.Metric ): def lowerCamelCase ( self : Tuple): """simple docstring""" if version.parse(scb.__version__) < version.parse('''1.4.12'''): raise ImportWarning( '''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n''' '''You can install it with `pip install "sacrebleu>=1.4.12"`.''') return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence'''), '''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''') , id='''references'''), }) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[ '''https://github.com/jhclark/tercom''', ] , ) def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , ): """simple docstring""" UpperCAmelCase_ = len(references[0]) if any(len(_snake_case) != references_per_prediction for refs in references): raise ValueError('''Sacrebleu requires the same number of references for each prediction''') UpperCAmelCase_ = [[refs[i] for refs in references] for i in range(_snake_case)] UpperCAmelCase_ = TER( normalized=_snake_case , no_punct=_snake_case , asian_support=_snake_case , case_sensitive=_snake_case , ) UpperCAmelCase_ = sb_ter.corpus_score(_snake_case , _snake_case) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
7
0
import math def A (__A : int ) -> list: """simple docstring""" UpperCAmelCase_ = [True] * n UpperCAmelCase_ = False UpperCAmelCase_ = False UpperCAmelCase_ = True for i in range(3 , int(n**0.5 + 1 ) , 2 ): UpperCAmelCase_ = i * 2 while index < n: UpperCAmelCase_ = False UpperCAmelCase_ = index + i UpperCAmelCase_ = [2] for i in range(3 , __A , 2 ): if is_prime[i]: primes.append(__A ) return primes def A (__A : int = 999966663333 ) -> int: """simple docstring""" UpperCAmelCase_ = math.floor(math.sqrt(__A ) ) + 100 UpperCAmelCase_ = prime_sieve(__A ) UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 UpperCAmelCase_ = primes[prime_index] while (last_prime**2) <= limit: UpperCAmelCase_ = primes[prime_index + 1] UpperCAmelCase_ = last_prime**2 UpperCAmelCase_ = next_prime**2 # Get numbers divisible by lps(current) UpperCAmelCase_ = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) UpperCAmelCase_ = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps UpperCAmelCase_ = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair UpperCAmelCase_ = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
351
import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class __snake_case ( unittest.TestCase , a ): def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = load_tool('''text-to-speech''') self.tool.setup() def lowerCamelCase ( self : int): """simple docstring""" torch.manual_seed(0) UpperCAmelCase_ = self.tool('''hey''') UpperCAmelCase_ = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , )) def lowerCamelCase ( self : Any): """simple docstring""" torch.manual_seed(0) UpperCAmelCase_ = self.tool('''hey''') UpperCAmelCase_ = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , ))
7
0
from typing import List from .keymap import KEYMAP, get_character def A (__A : str ) -> Tuple: """simple docstring""" def decorator(__A : str ): UpperCAmelCase_ = getattr(__A , '''handle_key''' , [] ) handle += [key] setattr(__A , '''handle_key''' , __A ) return func return decorator def A (*__A : List[str] ) -> Any: """simple docstring""" def decorator(__A : Any ): UpperCAmelCase_ = getattr(__A , '''handle_key''' , [] ) handle += keys setattr(__A , '''handle_key''' , __A ) return func return decorator class __snake_case ( a ): def __new__( cls : Optional[Any] , _snake_case : Dict , _snake_case : str , _snake_case : Dict): """simple docstring""" UpperCAmelCase_ = super().__new__(cls , _snake_case , _snake_case , _snake_case) if not hasattr(_snake_case , '''key_handler'''): setattr(_snake_case , '''key_handler''' , {}) setattr(_snake_case , '''handle_input''' , KeyHandler.handle_input) for value in attrs.values(): UpperCAmelCase_ = getattr(_snake_case , '''handle_key''' , []) for key in handled_keys: UpperCAmelCase_ = value return new_cls @staticmethod def lowerCamelCase ( cls : Optional[int]): """simple docstring""" UpperCAmelCase_ = get_character() if char != KEYMAP["undefined"]: UpperCAmelCase_ = ord(_snake_case) UpperCAmelCase_ = cls.key_handler.get(_snake_case) if handler: UpperCAmelCase_ = char return handler(cls) else: return None def A (cls : int ) -> int: """simple docstring""" return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
352
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) else: from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel, UTransformeraDModel from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
7
0
"""simple docstring""" from maths.prime_factors import prime_factors def A (__A : int ) -> int: """simple docstring""" if not isinstance(__A , __A ): UpperCAmelCase_ = F"""Input value of [number={number}] must be an integer""" raise TypeError(__A ) if number < 1: raise ValueError('''Input must be a positive integer''' ) return -1 if len(prime_factors(__A ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
353
import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __snake_case : @staticmethod def lowerCamelCase ( *_snake_case : List[str] , **_snake_case : str): """simple docstring""" pass @is_pipeline_test @require_torch @require_vision class __snake_case ( unittest.TestCase ): UpperCAmelCase__ : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def lowerCamelCase ( self : Any , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''') UpperCAmelCase_ = [ { '''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''), '''question''': '''How many cats are there?''', }, { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''question''': '''How many cats are there?''', }, ] return vqa_pipeline, examples def lowerCamelCase ( self : Optional[int] , _snake_case : List[str] , _snake_case : Optional[int]): """simple docstring""" UpperCAmelCase_ = vqa_pipeline(_snake_case , top_k=1) self.assertEqual( _snake_case , [ [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}], [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}], ] , ) @require_torch def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''') UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png''' UpperCAmelCase_ = '''How many cats are there?''' UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2) self.assertEqual( _snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}]) UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2) self.assertEqual( _snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}]) @slow @require_torch def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''') UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png''' UpperCAmelCase_ = '''How many cats are there?''' UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2) self.assertEqual( nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]) UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2) self.assertEqual( nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]) UpperCAmelCase_ = vqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2) self.assertEqual( nested_simplify(_snake_case , decimals=4) , [[{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]] * 2 , ) @require_tf @unittest.skip('''Visual question answering not implemented in TF''') def lowerCamelCase ( self : Tuple): """simple docstring""" pass
7
0
from __future__ import annotations import math def __A (__A : int ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __A (__A : int ) -> list[int]: """simple docstring""" UpperCAmelCase_ = str(__A ) UpperCAmelCase_ = [n] for i in range(1 , len(__A ) ): list_nums.append(int(str_num[i:] ) ) list_nums.append(int(str_num[:-i] ) ) return list_nums def __A (__A : int ) -> bool: """simple docstring""" if len(str(__A ) ) > 3: if not is_prime(int(str(__A )[-3:] ) ) or not is_prime(int(str(__A )[:3] ) ): return False return True def __A (__A : int = 11 ) -> list[int]: """simple docstring""" UpperCAmelCase_ = [] UpperCAmelCase_ = 13 while len(__A ) != count: if validate(__A ): UpperCAmelCase_ = list_truncated_nums(__A ) if all(is_prime(__A ) for i in list_nums ): list_truncated_primes.append(__A ) num += 2 return list_truncated_primes def __A () -> int: """simple docstring""" return sum(compute_truncated_primes(11 ) ) if __name__ == "__main__": print(f"{sum(compute_truncated_primes(11)) = }")
354
from timeit import timeit def A (__A : int ) -> int: """simple docstring""" if number < 0: raise ValueError('''the value of input must not be negative''' ) UpperCAmelCase_ = 0 while number: number &= number - 1 result += 1 return result def A (__A : int ) -> int: """simple docstring""" if number < 0: raise ValueError('''the value of input must not be negative''' ) UpperCAmelCase_ = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def A () -> None: """simple docstring""" def do_benchmark(__A : int ) -> None: UpperCAmelCase_ = '''import __main__ as z''' print(F"""Benchmark when {number = }:""" ) print(F"""{get_set_bits_count_using_modulo_operator(__A ) = }""" ) UpperCAmelCase_ = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=__A ) print(F"""timeit() runs in {timing} seconds""" ) print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__A ) = }""" ) UpperCAmelCase_ = timeit( '''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=__A , ) print(F"""timeit() runs in {timing} seconds""" ) for number in (25, 37, 58, 0): do_benchmark(__A ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
7
0
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer snake_case_ : Optional[int] = logging.get_logger(__name__) snake_case_ : List[str] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} snake_case_ : Dict = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } snake_case_ : Any = { "allenai/led-base-16384": 16384, } class __snake_case ( a ): UpperCAmelCase__ : List[str] = VOCAB_FILES_NAMES UpperCAmelCase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : Any = LEDTokenizer UpperCAmelCase__ : str = ['''input_ids''', '''attention_mask'''] def __init__( self : str , _snake_case : Optional[int]=None , _snake_case : int=None , _snake_case : str=None , _snake_case : Optional[Any]="replace" , _snake_case : Dict="<s>" , _snake_case : Optional[Any]="</s>" , _snake_case : Dict="</s>" , _snake_case : List[str]="<s>" , _snake_case : Union[str, Any]="<unk>" , _snake_case : Dict="<pad>" , _snake_case : Tuple="<mask>" , _snake_case : str=False , _snake_case : List[Any]=True , **_snake_case : Tuple , ): """simple docstring""" super().__init__( _snake_case , _snake_case , tokenizer_file=_snake_case , errors=_snake_case , bos_token=_snake_case , eos_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , unk_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case , **_snake_case , ) UpperCAmelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('''add_prefix_space''' , _snake_case) != add_prefix_space: UpperCAmelCase_ = getattr(_snake_case , pre_tok_state.pop('''type''')) UpperCAmelCase_ = add_prefix_space UpperCAmelCase_ = pre_tok_class(**_snake_case) UpperCAmelCase_ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` UpperCAmelCase_ = '''post_processor''' UpperCAmelCase_ = getattr(self.backend_tokenizer , _snake_case , _snake_case) if tokenizer_component_instance: UpperCAmelCase_ = json.loads(tokenizer_component_instance.__getstate__()) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: UpperCAmelCase_ = tuple(state['''sep''']) if "cls" in state: UpperCAmelCase_ = tuple(state['''cls''']) UpperCAmelCase_ = False if state.get('''add_prefix_space''' , _snake_case) != add_prefix_space: UpperCAmelCase_ = add_prefix_space UpperCAmelCase_ = True if state.get('''trim_offsets''' , _snake_case) != trim_offsets: UpperCAmelCase_ = trim_offsets UpperCAmelCase_ = True if changes_to_apply: UpperCAmelCase_ = getattr(_snake_case , state.pop('''type''')) UpperCAmelCase_ = component_class(**_snake_case) setattr(self.backend_tokenizer , _snake_case , _snake_case) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def lowerCamelCase ( self : str): """simple docstring""" if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''') return None return str(self._mask_token) @mask_token.setter def lowerCamelCase ( self : Dict , _snake_case : List[Any]): """simple docstring""" UpperCAmelCase_ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case) if isinstance(_snake_case , _snake_case) else value UpperCAmelCase_ = value def lowerCamelCase ( self : Union[str, Any] , *_snake_case : str , **_snake_case : int): """simple docstring""" UpperCAmelCase_ = kwargs.get('''is_split_into_words''' , _snake_case) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''') return super()._batch_encode_plus(*_snake_case , **_snake_case) def lowerCamelCase ( self : List[Any] , *_snake_case : int , **_snake_case : Optional[int]): """simple docstring""" UpperCAmelCase_ = kwargs.get('''is_split_into_words''' , _snake_case) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''') return super()._encode_plus(*_snake_case , **_snake_case) def lowerCamelCase ( self : Dict , _snake_case : str , _snake_case : Optional[str] = None): """simple docstring""" UpperCAmelCase_ = self._tokenizer.model.save(_snake_case , name=_snake_case) return tuple(_snake_case) def lowerCamelCase ( self : List[str] , _snake_case : List[str] , _snake_case : List[str]=None): """simple docstring""" UpperCAmelCase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowerCamelCase ( self : str , _snake_case : List[int] , _snake_case : Optional[List[int]] = None): """simple docstring""" UpperCAmelCase_ = [self.sep_token_id] UpperCAmelCase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def lowerCamelCase ( self : Dict , _snake_case : Union[Dict[str, EncodedInput], BatchEncoding] , _snake_case : Optional[int] = None , _snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , ): """simple docstring""" UpperCAmelCase_ = super()._pad( encoded_inputs=_snake_case , max_length=_snake_case , padding_strategy=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , ) # Load from model defaults if return_attention_mask is None: UpperCAmelCase_ = '''attention_mask''' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: UpperCAmelCase_ = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. UpperCAmelCase_ = len(encoded_inputs['''global_attention_mask''']) != len(_snake_case) if needs_to_be_padded: UpperCAmelCase_ = len(_snake_case) - len(encoded_inputs['''global_attention_mask''']) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` UpperCAmelCase_ = ( encoded_inputs['''global_attention_mask'''] + [-1] * difference ) elif self.padding_side == "left": UpperCAmelCase_ = [-1] * difference + encoded_inputs[ '''global_attention_mask''' ] else: raise ValueError('''Invalid padding strategy:''' + str(self.padding_side)) return encoded_inputs
355
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class __snake_case ( unittest.TestCase ): def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = 10 def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = [1, 2, 3, 4] UpperCAmelCase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case) def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case) def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case) def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = '''It was the year of Our Lord one thousand seven hundred and seventy-five.\n\nSpiritual revelations were conceded to England at that favoured period, as at this.''' UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case) self.assertEqual(_snake_case , []) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = '''''' UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case) self.assertEqual(_snake_case , []) self.assertEqual(_snake_case , []) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = ( '''It was the year of Our Lord one thousand seven hundred and ''' '''seventy-five\n\nSpiritual revelations were conceded to England ''' '''at that favoured period, as at this.\n@highlight\n\nIt was the best of times''' ) UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case) UpperCAmelCase_ = [ '''It was the year of Our Lord one thousand seven hundred and seventy-five.''', '''Spiritual revelations were conceded to England at that favoured period, as at this.''', ] self.assertEqual(_snake_case , _snake_case) UpperCAmelCase_ = ['''It was the best of times.'''] self.assertEqual(_snake_case , _snake_case) def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = torch.tensor([1, 2, 3, 4]) UpperCAmelCase_ = torch.tensor([1, 1, 1, 1]) np.testing.assert_array_equal(build_mask(_snake_case , 0).numpy() , expected.numpy()) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = torch.tensor([1, 2, 3, 4, 23, 23, 23]) UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0]) np.testing.assert_array_equal(build_mask(_snake_case , 23).numpy() , expected.numpy()) def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1]) UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0]) np.testing.assert_array_equal(build_mask(_snake_case , 1).numpy() , expected.numpy()) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = 101 UpperCAmelCase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]]) UpperCAmelCase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]]) UpperCAmelCase_ = compute_token_type_ids(_snake_case , _snake_case) np.testing.assert_array_equal(_snake_case , _snake_case)
7
0
from typing import Dict, Iterable, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract snake_case_ : Tuple = logging.get_logger(__name__) def A (__A : Any , __A : List[str] , __A : Any ) -> List[Any]: """simple docstring""" return [ int(1000 * (box[0] / width) ), int(1000 * (box[1] / height) ), int(1000 * (box[2] / width) ), int(1000 * (box[3] / height) ), ] def A (__A : np.ndarray , __A : Optional[str] , __A : Optional[str] ) -> str: """simple docstring""" UpperCAmelCase_ = to_pil_image(__A ) UpperCAmelCase_ , UpperCAmelCase_ = pil_image.size UpperCAmelCase_ = pytesseract.image_to_data(__A , lang=__A , output_type='''dict''' , config=__A ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height'''] # filter empty words and corresponding coordinates UpperCAmelCase_ = [idx for idx, word in enumerate(__A ) if not word.strip()] UpperCAmelCase_ = [word for idx, word in enumerate(__A ) if idx not in irrelevant_indices] UpperCAmelCase_ = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices] UpperCAmelCase_ = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices] UpperCAmelCase_ = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices] UpperCAmelCase_ = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format UpperCAmelCase_ = [] for x, y, w, h in zip(__A , __A , __A , __A ): UpperCAmelCase_ = [x, y, x + w, y + h] actual_boxes.append(__A ) # finally, normalize the bounding boxes UpperCAmelCase_ = [] for box in actual_boxes: normalized_boxes.append(normalize_box(__A , __A , __A ) ) assert len(__A ) == len(__A ), "Not as many words as there are bounding boxes" return words, normalized_boxes class __snake_case ( a ): UpperCAmelCase__ : Tuple = ['''pixel_values'''] def __init__( self : Dict , _snake_case : bool = True , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = PILImageResampling.BILINEAR , _snake_case : bool = True , _snake_case : float = 1 / 255 , _snake_case : bool = True , _snake_case : Union[float, Iterable[float]] = None , _snake_case : Union[float, Iterable[float]] = None , _snake_case : bool = True , _snake_case : Optional[str] = None , _snake_case : Optional[str] = "" , **_snake_case : Optional[int] , ): """simple docstring""" super().__init__(**_snake_case) UpperCAmelCase_ = size if size is not None else {'''height''': 224, '''width''': 224} UpperCAmelCase_ = get_size_dict(_snake_case) UpperCAmelCase_ = do_resize UpperCAmelCase_ = size UpperCAmelCase_ = resample UpperCAmelCase_ = do_rescale UpperCAmelCase_ = rescale_value UpperCAmelCase_ = do_normalize UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD UpperCAmelCase_ = apply_ocr UpperCAmelCase_ = ocr_lang UpperCAmelCase_ = tesseract_config def lowerCamelCase ( self : str , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : PILImageResampling = PILImageResampling.BILINEAR , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Union[str, Any] , ): """simple docstring""" UpperCAmelCase_ = get_size_dict(_snake_case) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""") UpperCAmelCase_ = (size['''height'''], size['''width''']) return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case) def lowerCamelCase ( self : List[Any] , _snake_case : np.ndarray , _snake_case : Union[int, float] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Dict , ): """simple docstring""" return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case) def lowerCamelCase ( self : Union[str, Any] , _snake_case : np.ndarray , _snake_case : Union[float, Iterable[float]] , _snake_case : Union[float, Iterable[float]] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : str , ): """simple docstring""" return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case) def lowerCamelCase ( self : Optional[int] , _snake_case : ImageInput , _snake_case : bool = None , _snake_case : Dict[str, int] = None , _snake_case : Optional[int]=None , _snake_case : bool = None , _snake_case : float = None , _snake_case : bool = None , _snake_case : Union[float, Iterable[float]] = None , _snake_case : Union[float, Iterable[float]] = None , _snake_case : bool = None , _snake_case : Optional[str] = None , _snake_case : Optional[str] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : ChannelDimension = ChannelDimension.FIRST , **_snake_case : List[str] , ): """simple docstring""" UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ = size if size is not None else self.size UpperCAmelCase_ = get_size_dict(_snake_case) UpperCAmelCase_ = resample if resample is not None else self.resample UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_ = image_std if image_std is not None else self.image_std UpperCAmelCase_ = apply_ocr if apply_ocr is not None else self.apply_ocr UpperCAmelCase_ = ocr_lang if ocr_lang is not None else self.ocr_lang UpperCAmelCase_ = tesseract_config if tesseract_config is not None else self.tesseract_config UpperCAmelCase_ = make_list_of_images(_snake_case) if not valid_images(_snake_case): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''') if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''') if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''') if do_normalize and (image_mean is None or image_std is None): raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''') # All transformations expect numpy arrays. UpperCAmelCase_ = [to_numpy_array(_snake_case) for image in images] # Tesseract OCR to get words + normalized bounding boxes if apply_ocr: requires_backends(self , '''pytesseract''') UpperCAmelCase_ = [] UpperCAmelCase_ = [] for image in images: UpperCAmelCase_ , UpperCAmelCase_ = apply_tesseract(_snake_case , _snake_case , _snake_case) words_batch.append(_snake_case) boxes_batch.append(_snake_case) if do_resize: UpperCAmelCase_ = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case) for image in images] if do_rescale: UpperCAmelCase_ = [self.rescale(image=_snake_case , scale=_snake_case) for image in images] if do_normalize: UpperCAmelCase_ = [self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case) for image in images] UpperCAmelCase_ = [to_channel_dimension_format(_snake_case , _snake_case) for image in images] UpperCAmelCase_ = BatchFeature(data={'''pixel_values''': images} , tensor_type=_snake_case) if apply_ocr: UpperCAmelCase_ = words_batch UpperCAmelCase_ = boxes_batch return data
356
import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): snake_case_ : Any = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right snake_case_ : Optional[Any] = 128022 snake_case_ : Optional[int] = 128028 @require_sentencepiece class __snake_case ( a , unittest.TestCase ): UpperCAmelCase__ : List[str] = MaMaaaTokenizer UpperCAmelCase__ : int = False UpperCAmelCase__ : Dict = False UpperCAmelCase__ : List[str] = True def lowerCamelCase ( self : str): """simple docstring""" super().setUp() UpperCAmelCase_ = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>'''] UpperCAmelCase_ = dict(zip(_snake_case , range(len(_snake_case)))) UpperCAmelCase_ = Path(self.tmpdirname) save_json(_snake_case , save_dir / VOCAB_FILES_NAMES['''vocab_file''']) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(_snake_case , save_dir / VOCAB_FILES_NAMES['''spm_file''']) UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname) def lowerCamelCase ( self : str , **_snake_case : Union[str, Any]): """simple docstring""" return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_snake_case) def lowerCamelCase ( self : Optional[int] , _snake_case : List[str]): """simple docstring""" return ( "This is a test", "This is a test", ) def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ = '''</s>''' UpperCAmelCase_ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case) , _snake_case) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case) , _snake_case) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = list(tokenizer.get_vocab().keys()) self.assertEqual(vocab_keys[0] , '''</s>''') self.assertEqual(vocab_keys[1] , '''<unk>''') self.assertEqual(vocab_keys[-1] , '''<s>''') self.assertEqual(len(_snake_case) , tokenizer.vocab_size + len(tokenizer.get_added_vocab())) @unittest.skip('''Skip this test while all models are still to be uploaded.''') def lowerCamelCase ( self : Optional[int]): """simple docstring""" pass def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = tokenizer.tokenize('''This is a test''') self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']) self.assertListEqual( tokenizer.convert_tokens_to_ids(_snake_case) , [2, 3, 4, 5, 6] , ) UpperCAmelCase_ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6]) self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']) UpperCAmelCase_ = tokenizer.convert_tokens_to_string(_snake_case) self.assertEqual(_snake_case , '''This is a test''') @slow def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = {'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_snake_case , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , ) @require_torch @require_sentencepiece @require_tokenizers class __snake_case ( unittest.TestCase ): UpperCAmelCase__ : Dict = '''facebook/m2m100_418M''' UpperCAmelCase__ : Dict = [ '''In my opinion, there are two levels of response from the French government.''', '''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''', ] UpperCAmelCase__ : Dict = [ '''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''', '''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''', ] # fmt: off UpperCAmelCase__ : Any = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2] @classmethod def lowerCamelCase ( cls : Optional[Any]): """simple docstring""" UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''') UpperCAmelCase_ = 1 return cls def lowerCamelCase ( self : List[Any]): """simple docstring""" self.assertEqual(self.tokenizer.get_lang_id('''ar''') , 128006) self.assertEqual(self.tokenizer.get_lang_id('''en''') , 128022) self.assertEqual(self.tokenizer.get_lang_id('''ro''') , 128076) self.assertEqual(self.tokenizer.get_lang_id('''mr''') , 128063) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = self.tokenizer.get_vocab() self.assertEqual(len(_snake_case) , self.tokenizer.vocab_size) self.assertEqual(vocab['''<unk>'''] , 3) self.assertIn(self.tokenizer.get_lang_token('''en''') , _snake_case) def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = '''en''' UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0] self.assertListEqual(self.expected_src_tokens , _snake_case) def lowerCamelCase ( self : Any): """simple docstring""" self.assertIn(_snake_case , self.tokenizer.all_special_ids) # fmt: off UpperCAmelCase_ = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2] # fmt: on UpperCAmelCase_ = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case) UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_snake_case) self.assertEqual(_snake_case , _snake_case) self.assertNotIn(self.tokenizer.eos_token , _snake_case) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = tempfile.mkdtemp() UpperCAmelCase_ = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(_snake_case) UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(_snake_case) self.assertDictEqual(new_tok.lang_token_to_id , _snake_case) @require_torch def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = '''en''' UpperCAmelCase_ = '''fr''' UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_snake_case , return_tensors='''pt''') UpperCAmelCase_ = shift_tokens_right( batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id) for k in batch: UpperCAmelCase_ = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = '''mr''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')]) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id]) UpperCAmelCase_ = '''zh''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')]) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id]) @require_torch def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = '''mr''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')]) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id]) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)]) UpperCAmelCase_ = '''zh''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')]) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id]) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)]) @require_torch def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''') self.assertEqual( nested_simplify(_snake_case) , { # en_XX, A, test, EOS '''input_ids''': [[128022, 58, 4183, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 128006, } , )
7
0
import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class __snake_case ( a , unittest.TestCase ): UpperCAmelCase__ : Union[str, Any] = TransfoXLTokenizer UpperCAmelCase__ : List[str] = False UpperCAmelCase__ : Any = False def lowerCamelCase ( self : Tuple): """simple docstring""" super().setUp() UpperCAmelCase_ = [ '''<unk>''', '''[CLS]''', '''[SEP]''', '''want''', '''unwanted''', '''wa''', '''un''', '''running''', ''',''', '''low''', '''l''', ] UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file''']) with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens])) def lowerCamelCase ( self : Tuple , **_snake_case : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **_snake_case) def lowerCamelCase ( self : Optional[int] , _snake_case : int): """simple docstring""" UpperCAmelCase_ = '''<unk> UNwanted , running''' UpperCAmelCase_ = '''<unk> unwanted, running''' return input_text, output_text def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=_snake_case) UpperCAmelCase_ = tokenizer.tokenize('''<unk> UNwanted , running''') self.assertListEqual(_snake_case , ['''<unk>''', '''unwanted''', ''',''', '''running''']) self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case) , [0, 4, 8, 7]) def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = TransfoXLTokenizer(lower_case=_snake_case) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''') , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?''']) def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = TransfoXLTokenizer(lower_case=_snake_case) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''') , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''']) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = TransfoXLTokenizer(lower_case=_snake_case) UpperCAmelCase_ = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?''' UpperCAmelCase_ = [ '''Hello''', '''(''', '''bracket''', ''')''', '''and''', '''side''', '''@-@''', '''scrolled''', '''[''', '''and''', ''']''', '''Henry''', '''\'s''', '''$''', '''5''', '''@,@''', '''000''', '''with''', '''3''', '''@.@''', '''34''', '''m''', '''.''', '''What''', '''\'s''', '''up''', '''!''', '''?''', ] self.assertListEqual(tokenizer.tokenize(_snake_case) , _snake_case) self.assertEqual(tokenizer.convert_tokens_to_string(_snake_case) , _snake_case) def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = len(_snake_case) tokenizer.add_tokens(['''new1''', '''new2''']) tokenizer.move_added_token('''new1''' , 1) # Check that moved token is not copied (duplicate) self.assertEqual(len(_snake_case) , original_len + 2) # Check that token is moved to specified id self.assertEqual(tokenizer.encode('''new1''') , [1]) self.assertEqual(tokenizer.decode([1]) , '''new1''')
357
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING snake_case_ : List[str] = logging.get_logger(__name__) @add_end_docstrings(a ) class __snake_case ( a ): def __init__( self : Tuple , *_snake_case : List[Any] , **_snake_case : Optional[Any]): """simple docstring""" super().__init__(*_snake_case , **_snake_case) self.check_model_type(_snake_case) def lowerCamelCase ( self : List[str] , _snake_case : Optional[int]=None , _snake_case : Optional[Any]=None , _snake_case : str=None , **_snake_case : Optional[Any]): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = {}, {} if padding is not None: UpperCAmelCase_ = padding if truncation is not None: UpperCAmelCase_ = truncation if top_k is not None: UpperCAmelCase_ = top_k return preprocess_params, {}, postprocess_params def __call__( self : List[Any] , _snake_case : Union["Image.Image", str] , _snake_case : str = None , **_snake_case : str): """simple docstring""" if isinstance(_snake_case , (Image.Image, str)) and isinstance(_snake_case , _snake_case): UpperCAmelCase_ = {'''image''': image, '''question''': question} else: UpperCAmelCase_ = image UpperCAmelCase_ = super().__call__(_snake_case , **_snake_case) return results def lowerCamelCase ( self : Union[str, Any] , _snake_case : int , _snake_case : Optional[int]=False , _snake_case : int=False): """simple docstring""" UpperCAmelCase_ = load_image(inputs['''image''']) UpperCAmelCase_ = self.tokenizer( inputs['''question'''] , return_tensors=self.framework , padding=_snake_case , truncation=_snake_case) UpperCAmelCase_ = self.image_processor(images=_snake_case , return_tensors=self.framework) model_inputs.update(_snake_case) return model_inputs def lowerCamelCase ( self : List[Any] , _snake_case : Optional[Any]): """simple docstring""" UpperCAmelCase_ = self.model(**_snake_case) return model_outputs def lowerCamelCase ( self : str , _snake_case : Optional[Any] , _snake_case : List[str]=5): """simple docstring""" if top_k > self.model.config.num_labels: UpperCAmelCase_ = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase_ = model_outputs.logits.sigmoid()[0] UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(_snake_case) else: raise ValueError(F"""Unsupported framework: {self.framework}""") UpperCAmelCase_ = scores.tolist() UpperCAmelCase_ = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_snake_case , _snake_case)]
7
0
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class __snake_case ( unittest.TestCase ): def lowerCamelCase ( self : Optional[int] , _snake_case : Union[str, Any]): """simple docstring""" for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss''']): UpperCAmelCase_ = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(_snake_case) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = '''sgugger/tiny-distilbert-classification''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , only_pretrain_model=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , torchscript=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''') def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , fpaa=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) # set architectures equal to `None` UpperCAmelCase_ = None UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config]) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) @unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''') def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_snake_case , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config]) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tinier_bart''' UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config]) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config]) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tinier_bart''' UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config]) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , save_to_csv=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_snake_case , '''inf_time.csv''') , train_memory_csv_file=os.path.join(_snake_case , '''train_mem.csv''') , inference_memory_csv_file=os.path.join(_snake_case , '''inf_mem.csv''') , train_time_csv_file=os.path.join(_snake_case , '''train_time.csv''') , env_info_csv_file=os.path.join(_snake_case , '''env.csv''') , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) benchmark.run() self.assertTrue(Path(os.path.join(_snake_case , '''inf_time.csv''')).exists()) self.assertTrue(Path(os.path.join(_snake_case , '''train_time.csv''')).exists()) self.assertTrue(Path(os.path.join(_snake_case , '''inf_mem.csv''')).exists()) self.assertTrue(Path(os.path.join(_snake_case , '''train_mem.csv''')).exists()) self.assertTrue(Path(os.path.join(_snake_case , '''env.csv''')).exists()) def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(_snake_case : Tuple): self.assertTrue(hasattr(_snake_case , '''sequential''')) self.assertTrue(hasattr(_snake_case , '''cumulative''')) self.assertTrue(hasattr(_snake_case , '''current''')) self.assertTrue(hasattr(_snake_case , '''total''')) with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_snake_case , '''log.txt''') , log_print=_snake_case , trace_memory_line_by_line=_snake_case , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() _check_summary_is_not_empty(result.inference_summary) _check_summary_is_not_empty(result.train_summary) self.assertTrue(Path(os.path.join(_snake_case , '''log.txt''')).exists())
358
import sys def A (__A : int ) -> Dict: """simple docstring""" UpperCAmelCase_ = len(__A ) UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )] UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )] for chain_length in range(2 , __A ): for a in range(1 , n - chain_length + 1 ): UpperCAmelCase_ = a + chain_length - 1 UpperCAmelCase_ = sys.maxsize for c in range(__A , __A ): UpperCAmelCase_ = ( matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b] ) if cost < matrix[a][b]: UpperCAmelCase_ = cost UpperCAmelCase_ = c return matrix, sol def A (__A : Any , __A : Dict , __A : Optional[int] ) -> Optional[int]: """simple docstring""" if i == j: print('''A''' + str(__A ) , end=''' ''' ) else: print('''(''' , end=''' ''' ) print_optiomal_solution(__A , __A , optimal_solution[i][j] ) print_optiomal_solution(__A , optimal_solution[i][j] + 1 , __A ) print(''')''' , end=''' ''' ) def A () -> List[str]: """simple docstring""" UpperCAmelCase_ = [30, 35, 15, 5, 10, 20, 25] UpperCAmelCase_ = len(__A ) # Size of matrix created from above array will be # 30*35 35*15 15*5 5*10 10*20 20*25 UpperCAmelCase_ , UpperCAmelCase_ = matrix_chain_order(__A ) print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) ) print_optiomal_solution(__A , 1 , n - 1 ) if __name__ == "__main__": main()
7
0
"""simple docstring""" from __future__ import annotations def A (__A : list[list[int]] ) -> bool: """simple docstring""" UpperCAmelCase_ = len(__A ) # We need to create solution object to save path. UpperCAmelCase_ = [[0 for _ in range(__A )] for _ in range(__A )] UpperCAmelCase_ = run_maze(__A , 0 , 0 , __A ) if solved: print('''\n'''.join(str(__A ) for row in solutions ) ) else: print('''No solution exists!''' ) return solved def A (__A : list[list[int]] , __A : int , __A : int , __A : list[list[int]] ) -> bool: """simple docstring""" UpperCAmelCase_ = len(__A ) # Final check point. if i == j == (size - 1): UpperCAmelCase_ = 1 return True UpperCAmelCase_ = (not i < 0) and (not j < 0) # Check lower bounds UpperCAmelCase_ = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. UpperCAmelCase_ = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited UpperCAmelCase_ = 1 # check for directions if ( run_maze(__A , i + 1 , __A , __A ) or run_maze(__A , __A , j + 1 , __A ) or run_maze(__A , i - 1 , __A , __A ) or run_maze(__A , __A , j - 1 , __A ) ): return True UpperCAmelCase_ = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
359
import inspect import os import re from transformers.configuration_utils import PretrainedConfig from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py snake_case_ : int = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. snake_case_ : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS) snake_case_ : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING snake_case_ : Union[str, Any] = { # used to compute the property `self.chunk_length` "EncodecConfig": ["overlap"], # used as `self.bert_model = BertModel(config, ...)` "DPRConfig": True, # not used in modeling files, but it's an important information "FSMTConfig": ["langs"], # used internally in the configuration class file "GPTNeoConfig": ["attention_types"], # used internally in the configuration class file "EsmConfig": ["is_folding_model"], # used during training (despite we don't have training script for these models yet) "Mask2FormerConfig": ["ignore_value"], # `ignore_value` used during training (despite we don't have training script for these models yet) # `norm` used in conversion script (despite not using in the modeling file) "OneFormerConfig": ["ignore_value", "norm"], # used during preprocessing and collation, see `collating_graphormer.py` "GraphormerConfig": ["spatial_pos_max"], # used internally in the configuration class file "T5Config": ["feed_forward_proj"], # used internally in the configuration class file # `tokenizer_class` get default value `T5Tokenizer` intentionally "MT5Config": ["feed_forward_proj", "tokenizer_class"], "UMT5Config": ["feed_forward_proj", "tokenizer_class"], # used internally in the configuration class file "LongT5Config": ["feed_forward_proj"], # used internally in the configuration class file "SwitchTransformersConfig": ["feed_forward_proj"], # having default values other than `1e-5` - we can't fix them without breaking "BioGptConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "GLPNConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "SegformerConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "CvtConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "PerceiverConfig": ["layer_norm_eps"], # used internally to calculate the feature size "InformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate the feature size "TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate the feature size "AutoformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate `mlp_dim` "SamVisionConfig": ["mlp_ratio"], # For (head) training, but so far not implemented "ClapAudioConfig": ["num_classes"], # Not used, but providing useful information to users "SpeechT5HifiGanConfig": ["sampling_rate"], } # TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure SPECIAL_CASES_TO_ALLOW.update( { "CLIPSegConfig": True, "DeformableDetrConfig": True, "DetaConfig": True, "DinatConfig": True, "DonutSwinConfig": True, "EfficientFormerConfig": True, "FSMTConfig": True, "JukeboxConfig": True, "LayoutLMv2Config": True, "MaskFormerSwinConfig": True, "MT5Config": True, "NatConfig": True, "OneFormerConfig": True, "PerceiverConfig": True, "RagConfig": True, "SpeechT5Config": True, "SwinConfig": True, "Swin2SRConfig": True, "Swinv2Config": True, "SwitchTransformersConfig": True, "TableTransformerConfig": True, "TapasConfig": True, "TransfoXLConfig": True, "UniSpeechConfig": True, "UniSpeechSatConfig": True, "WavLMConfig": True, "WhisperConfig": True, # TODO: @Arthur (for `alignment_head` and `alignment_layer`) "JukeboxPriorConfig": True, # TODO: @Younes (for `is_decoder`) "Pix2StructTextConfig": True, } ) def A (__A : List[Any] , __A : Optional[int] , __A : str , __A : Dict ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ = False for attribute in attributes: for modeling_source in source_strings: # check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)` if ( F"""config.{attribute}""" in modeling_source or F"""getattr(config, \"{attribute}\"""" in modeling_source or F"""getattr(self.config, \"{attribute}\"""" in modeling_source ): UpperCAmelCase_ = True # Deal with multi-line cases elif ( re.search( RF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , __A , ) is not None ): UpperCAmelCase_ = True # `SequenceSummary` is called with `SequenceSummary(config)` elif attribute in [ "summary_type", "summary_use_proj", "summary_activation", "summary_last_dropout", "summary_proj_to_labels", "summary_first_dropout", ]: if "SequenceSummary" in modeling_source: UpperCAmelCase_ = True if attribute_used: break if attribute_used: break # common and important attributes, even if they do not always appear in the modeling files UpperCAmelCase_ = [ '''bos_index''', '''eos_index''', '''pad_index''', '''unk_index''', '''mask_index''', '''image_size''', '''use_cache''', '''out_features''', '''out_indices''', ] UpperCAmelCase_ = ['''encoder_no_repeat_ngram_size'''] # Special cases to be allowed UpperCAmelCase_ = True if not attribute_used: UpperCAmelCase_ = False for attribute in attributes: # Allow if the default value in the configuration class is different from the one in `PretrainedConfig` if attribute in ["is_encoder_decoder"] and default_value is True: UpperCAmelCase_ = True elif attribute in ["tie_word_embeddings"] and default_value is False: UpperCAmelCase_ = True # Allow cases without checking the default value in the configuration class elif attribute in attributes_to_allow + attributes_used_in_generation: UpperCAmelCase_ = True elif attribute.endswith('''_token_id''' ): UpperCAmelCase_ = True # configuration class specific cases if not case_allowed: UpperCAmelCase_ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] ) UpperCAmelCase_ = allowed_cases is True or attribute in allowed_cases return attribute_used or case_allowed def A (__A : Tuple ) -> List[Any]: """simple docstring""" UpperCAmelCase_ = dict(inspect.signature(config_class.__init__ ).parameters ) UpperCAmelCase_ = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']] UpperCAmelCase_ = [signature[param].default for param in parameter_names] # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long # as one variant is used, the test should pass UpperCAmelCase_ = {} if len(config_class.attribute_map ) > 0: UpperCAmelCase_ = {v: k for k, v in config_class.attribute_map.items()} # Get the path to modeling source files UpperCAmelCase_ = inspect.getsourcefile(__A ) UpperCAmelCase_ = os.path.dirname(__A ) # Let's check against all frameworks: as long as one framework uses an attribute, we are good. UpperCAmelCase_ = [os.path.join(__A , __A ) for fn in os.listdir(__A ) if fn.startswith('''modeling_''' )] # Get the source code strings UpperCAmelCase_ = [] for path in modeling_paths: if os.path.isfile(__A ): with open(__A ) as fp: modeling_sources.append(fp.read() ) UpperCAmelCase_ = [] for config_param, default_value in zip(__A , __A ): # `attributes` here is all the variant names for `config_param` UpperCAmelCase_ = [config_param] # some configuration classes have non-empty `attribute_map`, and both names could be used in the # corresponding modeling files. As long as one of them appears, it is fine. if config_param in reversed_attribute_map: attributes.append(reversed_attribute_map[config_param] ) if not check_attribute_being_used(__A , __A , __A , __A ): unused_attributes.append(attributes[0] ) return sorted(__A ) def A () -> Any: """simple docstring""" UpperCAmelCase_ = {} for _config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in _config_class.__module__: continue # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.) UpperCAmelCase_ = [ cls for name, cls in inspect.getmembers( inspect.getmodule(_config_class ) , lambda __A : inspect.isclass(__A ) and issubclass(__A , __A ) and inspect.getmodule(__A ) == inspect.getmodule(_config_class ) , ) ] for config_class in config_classes_in_module: UpperCAmelCase_ = check_config_attributes_being_used(__A ) if len(__A ) > 0: UpperCAmelCase_ = unused_attributes if len(__A ) > 0: UpperCAmelCase_ = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n''' for name, attributes in configs_with_unused_attributes.items(): error += F"""{name}: {attributes}\n""" raise ValueError(__A ) if __name__ == "__main__": check_config_attributes()
7
0
import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class __snake_case ( a ): UpperCAmelCase__ : Optional[int] = '''M-CLIP''' def __init__( self : Any , _snake_case : List[Any]=1024 , _snake_case : str=768 , **_snake_case : str): """simple docstring""" UpperCAmelCase_ = transformerDimSize UpperCAmelCase_ = imageDimSize super().__init__(**_snake_case) class __snake_case ( a ): UpperCAmelCase__ : Any = MCLIPConfig def __init__( self : List[str] , _snake_case : str , *_snake_case : Union[str, Any] , **_snake_case : Union[str, Any]): """simple docstring""" super().__init__(_snake_case , *_snake_case , **_snake_case) UpperCAmelCase_ = XLMRobertaModel(_snake_case) UpperCAmelCase_ = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims) def lowerCamelCase ( self : Optional[Any] , _snake_case : Tuple , _snake_case : Optional[Any]): """simple docstring""" UpperCAmelCase_ = self.transformer(input_ids=_snake_case , attention_mask=_snake_case)[0] UpperCAmelCase_ = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None] return self.LinearTransformation(_snake_case), embs
360
import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class __snake_case ( a , unittest.TestCase ): UpperCAmelCase__ : Optional[Any] = FlaxAutoencoderKL @property def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = 4 UpperCAmelCase_ = 3 UpperCAmelCase_ = (32, 32) UpperCAmelCase_ = jax.random.PRNGKey(0) UpperCAmelCase_ = jax.random.uniform(_snake_case , ((batch_size, num_channels) + sizes)) return {"sample": image, "prng_key": prng_key} def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = { '''block_out_channels''': [32, 64], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 4, } UpperCAmelCase_ = self.dummy_input return init_dict, inputs_dict
7
0
def A (__A : list ) -> float: """simple docstring""" UpperCAmelCase_ = 0 while len(__A ) > 1: UpperCAmelCase_ = 0 # Consider two files with minimum cost to be merged for _ in range(2 ): UpperCAmelCase_ = files.index(min(__A ) ) temp += files[min_index] files.pop(__A ) files.append(__A ) optimal_merge_cost += temp return optimal_merge_cost if __name__ == "__main__": import doctest doctest.testmod()
361
import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPTaConfig from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 snake_case_ : List[str] = { "return_dict": False, "output_hidden_states": True, "output_attentions": True, "torchscript": True, "torch_dtype": "float16", "use_bfloat16": True, "tf_legacy_loss": True, "pruned_heads": {"a": 1}, "tie_word_embeddings": False, "is_decoder": True, "cross_attention_hidden_size": 128, "add_cross_attention": True, "tie_encoder_decoder": True, "max_length": 50, "min_length": 3, "do_sample": True, "early_stopping": True, "num_beams": 3, "num_beam_groups": 3, "diversity_penalty": 0.5, "temperature": 2.0, "top_k": 10, "top_p": 0.7, "typical_p": 0.2, "repetition_penalty": 0.8, "length_penalty": 0.8, "no_repeat_ngram_size": 5, "encoder_no_repeat_ngram_size": 5, "bad_words_ids": [1, 2, 3], "num_return_sequences": 3, "chunk_size_feed_forward": 5, "output_scores": True, "return_dict_in_generate": True, "forced_bos_token_id": 2, "forced_eos_token_id": 3, "remove_invalid_values": True, "architectures": ["BertModel"], "finetuning_task": "translation", "id2label": {0: "label"}, "label2id": {"label": "0"}, "tokenizer_class": "BertTokenizerFast", "prefix": "prefix", "bos_token_id": 6, "pad_token_id": 7, "eos_token_id": 8, "sep_token_id": 9, "decoder_start_token_id": 10, "exponential_decay_length_penalty": (5, 1.01), "suppress_tokens": [0, 1], "begin_suppress_tokens": 2, "task_specific_params": {"translation": "some_params"}, "problem_type": "regression", } @is_staging_test class __snake_case ( unittest.TestCase ): @classmethod def lowerCamelCase ( cls : Optional[Any]): """simple docstring""" UpperCAmelCase_ = TOKEN HfFolder.save_token(_snake_case) @classmethod def lowerCamelCase ( cls : List[str]): """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-config''') except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''') except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-config''') except HTTPError: pass def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37) config.push_to_hub('''test-config''' , use_auth_token=self._token) UpperCAmelCase_ = BertConfig.from_pretrained(F"""{USER}/test-config""") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_snake_case , getattr(_snake_case , _snake_case)) # Reset repo delete_repo(token=self._token , repo_id='''test-config''') # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(_snake_case , repo_id='''test-config''' , push_to_hub=_snake_case , use_auth_token=self._token) UpperCAmelCase_ = BertConfig.from_pretrained(F"""{USER}/test-config""") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_snake_case , getattr(_snake_case , _snake_case)) def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37) config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token) UpperCAmelCase_ = BertConfig.from_pretrained('''valid_org/test-config-org''') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_snake_case , getattr(_snake_case , _snake_case)) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-config-org''') # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( _snake_case , repo_id='''valid_org/test-config-org''' , push_to_hub=_snake_case , use_auth_token=self._token) UpperCAmelCase_ = BertConfig.from_pretrained('''valid_org/test-config-org''') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_snake_case , getattr(_snake_case , _snake_case)) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" CustomConfig.register_for_auto_class() UpperCAmelCase_ = CustomConfig(attribute=42) config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''}) UpperCAmelCase_ = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=_snake_case) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''') self.assertEqual(new_config.attribute , 42) class __snake_case ( unittest.TestCase ): def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = GPTaConfig() # attempt to modify each of int/float/bool/str config records and verify they were updated UpperCAmelCase_ = c.n_embd + 1 # int UpperCAmelCase_ = c.resid_pdrop + 1.0 # float UpperCAmelCase_ = not c.scale_attn_weights # bool UpperCAmelCase_ = c.summary_type + '''foo''' # str c.update_from_string( F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""") self.assertEqual(_snake_case , c.n_embd , '''mismatch for key: n_embd''') self.assertEqual(_snake_case , c.resid_pdrop , '''mismatch for key: resid_pdrop''') self.assertEqual(_snake_case , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''') self.assertEqual(_snake_case , c.summary_type , '''mismatch for key: summary_type''') def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = PretrainedConfig() UpperCAmelCase_ = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( _snake_case , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version''']) UpperCAmelCase_ = [key for key, value in config_common_kwargs.items() if value == getattr(_snake_case , _snake_case)] if len(_snake_case) > 0: raise ValueError( '''The following keys are set with the default values in''' ''' `test_configuration_common.config_common_kwargs` pick another value for them:''' F""" {", ".join(_snake_case)}.""") def lowerCamelCase ( self : str): """simple docstring""" with self.assertRaises(_snake_case): # config is in subfolder, the following should not work without specifying the subfolder UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''') UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''') self.assertIsNotNone(_snake_case) def lowerCamelCase ( self : Any): """simple docstring""" UpperCAmelCase_ = mock.Mock() UpperCAmelCase_ = 500 UpperCAmelCase_ = {} UpperCAmelCase_ = HTTPError UpperCAmelCase_ = {} # Download this model to make sure it's in the cache. UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''') # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=_snake_case) as mock_head: UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''') # This check we did call the fake head request mock_head.assert_called() def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = BertConfig.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''') def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = AutoConfig.from_pretrained('''bert-base-cased''') UpperCAmelCase_ = ['''config.4.0.0.json'''] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(_snake_case) UpperCAmelCase_ = 2 json.dump(configuration.to_dict() , open(os.path.join(_snake_case , '''config.4.0.0.json''') , '''w''')) # This should pick the new configuration file as the version of Transformers is > 4.0.0 UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) self.assertEqual(new_configuration.hidden_size , 2) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 UpperCAmelCase_ = ['''config.42.0.0.json'''] UpperCAmelCase_ = 768 configuration.save_pretrained(_snake_case) shutil.move(os.path.join(_snake_case , '''config.4.0.0.json''') , os.path.join(_snake_case , '''config.42.0.0.json''')) UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) self.assertEqual(new_configuration.hidden_size , 768) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = '''hf-internal-testing/test-two-configs''' import transformers as new_transformers UpperCAmelCase_ = '''v4.0.0''' UpperCAmelCase_ , UpperCAmelCase_ = new_transformers.models.auto.AutoConfig.from_pretrained( _snake_case , return_unused_kwargs=_snake_case) self.assertEqual(new_configuration.hidden_size , 2) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(_snake_case , {}) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers UpperCAmelCase_ = '''v3.0.0''' UpperCAmelCase_ = old_transformers.models.auto.AutoConfig.from_pretrained(_snake_case) self.assertEqual(old_configuration.hidden_size , 768)
7
0
def A (__A : int | float | str ) -> tuple[int, int]: """simple docstring""" try: UpperCAmelCase_ = float(__A ) except ValueError: raise ValueError('''Please enter a valid number''' ) UpperCAmelCase_ = decimal - int(__A ) if fractional_part == 0: return int(__A ), 1 else: UpperCAmelCase_ = len(str(__A ).split('''.''' )[1] ) UpperCAmelCase_ = int(decimal * (10**number_of_frac_digits) ) UpperCAmelCase_ = 10**number_of_frac_digits UpperCAmelCase_ , UpperCAmelCase_ = denominator, numerator while True: UpperCAmelCase_ = dividend % divisor if remainder == 0: break UpperCAmelCase_ , UpperCAmelCase_ = divisor, remainder UpperCAmelCase_ , UpperCAmelCase_ = numerator / divisor, denominator / divisor return int(__A ), int(__A ) if __name__ == "__main__": print(f"{decimal_to_fraction(2) = }") print(f"{decimal_to_fraction(89.0) = }") print(f"{decimal_to_fraction('67') = }") print(f"{decimal_to_fraction('45.0') = }") print(f"{decimal_to_fraction(1.5) = }") print(f"{decimal_to_fraction('6.25') = }") print(f"{decimal_to_fraction('78td') = }")
362
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass snake_case_ : List[Any] = (3, 9, -11, 0, 7, 5, 1, -1) snake_case_ : str = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class __snake_case : UpperCAmelCase__ : int UpperCAmelCase__ : Node | None class __snake_case : def __init__( self : Optional[int] , _snake_case : Iterable[int]): """simple docstring""" UpperCAmelCase_ = None for i in sorted(_snake_case , reverse=_snake_case): UpperCAmelCase_ = Node(_snake_case , self.head) def __iter__( self : Dict): """simple docstring""" UpperCAmelCase_ = self.head while node: yield node.data UpperCAmelCase_ = node.next_node def __len__( self : int): """simple docstring""" return sum(1 for _ in self) def __str__( self : Optional[Any]): """simple docstring""" return " -> ".join([str(_snake_case) for node in self]) def A (__A : SortedLinkedList , __A : SortedLinkedList ) -> SortedLinkedList: """simple docstring""" return SortedLinkedList(list(__A ) + list(__A ) ) if __name__ == "__main__": import doctest doctest.testmod() snake_case_ : Union[str, Any] = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
7
0
from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def A (__A : Any ) -> Any: """simple docstring""" return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )} def A () -> List[Any]: """simple docstring""" UpperCAmelCase_ = ArgumentParser( '''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=__A ) UpperCAmelCase_ = parser.add_subparsers(help='''datasets-cli command helpers''' ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(__A ) EnvironmentCommand.register_subcommand(__A ) TestCommand.register_subcommand(__A ) RunBeamCommand.register_subcommand(__A ) DummyDataCommand.register_subcommand(__A ) # Parse args UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_known_args() if not hasattr(__A , '''func''' ): parser.print_help() exit(1 ) UpperCAmelCase_ = parse_unknown_args(__A ) # Run UpperCAmelCase_ = args.func(__A , **__A ) service.run() if __name__ == "__main__": main()
363
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig snake_case_ : Union[str, Any] = logging.get_logger(__name__) class __snake_case : def __init__( self : int , _snake_case : List[Any] , _snake_case : Tuple): """simple docstring""" UpperCAmelCase_ = question_encoder UpperCAmelCase_ = generator UpperCAmelCase_ = self.question_encoder def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int]): """simple docstring""" if os.path.isfile(_snake_case): raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""") os.makedirs(_snake_case , exist_ok=_snake_case) UpperCAmelCase_ = os.path.join(_snake_case , '''question_encoder_tokenizer''') UpperCAmelCase_ = os.path.join(_snake_case , '''generator_tokenizer''') self.question_encoder.save_pretrained(_snake_case) self.generator.save_pretrained(_snake_case) @classmethod def lowerCamelCase ( cls : Optional[Any] , _snake_case : Optional[Any] , **_snake_case : Optional[int]): """simple docstring""" from ..auto.tokenization_auto import AutoTokenizer UpperCAmelCase_ = kwargs.pop('''config''' , _snake_case) if config is None: UpperCAmelCase_ = RagConfig.from_pretrained(_snake_case) UpperCAmelCase_ = AutoTokenizer.from_pretrained( _snake_case , config=config.question_encoder , subfolder='''question_encoder_tokenizer''') UpperCAmelCase_ = AutoTokenizer.from_pretrained( _snake_case , config=config.generator , subfolder='''generator_tokenizer''') return cls(question_encoder=_snake_case , generator=_snake_case) def __call__( self : List[Any] , *_snake_case : List[str] , **_snake_case : List[Any]): """simple docstring""" return self.current_tokenizer(*_snake_case , **_snake_case) def lowerCamelCase ( self : List[Any] , *_snake_case : str , **_snake_case : Union[str, Any]): """simple docstring""" return self.generator.batch_decode(*_snake_case , **_snake_case) def lowerCamelCase ( self : str , *_snake_case : Optional[int] , **_snake_case : Any): """simple docstring""" return self.generator.decode(*_snake_case , **_snake_case) def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = self.question_encoder def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = self.generator def lowerCamelCase ( self : Optional[Any] , _snake_case : List[str] , _snake_case : Optional[List[str]] = None , _snake_case : Optional[int] = None , _snake_case : Optional[int] = None , _snake_case : str = "longest" , _snake_case : str = None , _snake_case : bool = True , **_snake_case : Optional[int] , ): """simple docstring""" warnings.warn( '''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ''' '''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ''' '''context manager to prepare your targets. See the documentation of your specific tokenizer for more ''' '''details''' , _snake_case , ) if max_length is None: UpperCAmelCase_ = self.current_tokenizer.model_max_length UpperCAmelCase_ = self( _snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , max_length=_snake_case , padding=_snake_case , truncation=_snake_case , **_snake_case , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: UpperCAmelCase_ = self.current_tokenizer.model_max_length UpperCAmelCase_ = self( text_target=_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , **_snake_case , ) UpperCAmelCase_ = labels['''input_ids'''] return model_inputs
7
0
import enum import warnings from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING snake_case_ : Optional[Any] = logging.get_logger(__name__) class __snake_case ( enum.Enum ): UpperCAmelCase__ : int = 0 UpperCAmelCase__ : Optional[int] = 1 @add_end_docstrings(a ) class __snake_case ( a ): UpperCAmelCase__ : Dict = '''generated''' def __init__( self : Optional[int] , *_snake_case : Optional[Any] , **_snake_case : Tuple): """simple docstring""" super().__init__(*_snake_case , **_snake_case) self.check_model_type( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING) def lowerCamelCase ( self : Optional[int] , _snake_case : Optional[int]=None , _snake_case : Optional[int]=None , _snake_case : Tuple=None , _snake_case : Tuple=None , _snake_case : Any=None , _snake_case : Optional[int]=None , **_snake_case : str , ): """simple docstring""" UpperCAmelCase_ = {} if truncation is not None: UpperCAmelCase_ = truncation UpperCAmelCase_ = generate_kwargs UpperCAmelCase_ = {} if return_tensors is not None and return_type is None: UpperCAmelCase_ = ReturnType.TENSORS if return_tensors else ReturnType.TEXT if return_type is not None: UpperCAmelCase_ = return_type if clean_up_tokenization_spaces is not None: UpperCAmelCase_ = clean_up_tokenization_spaces if stop_sequence is not None: UpperCAmelCase_ = self.tokenizer.encode(_snake_case , add_special_tokens=_snake_case) if len(_snake_case) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''') UpperCAmelCase_ = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def lowerCamelCase ( self : List[str] , _snake_case : int , _snake_case : int , _snake_case : int): """simple docstring""" return True def lowerCamelCase ( self : Union[str, Any] , *_snake_case : Union[str, Any] , _snake_case : Optional[Any]): """simple docstring""" UpperCAmelCase_ = self.model.config.prefix if self.model.config.prefix is not None else '''''' if isinstance(args[0] , _snake_case): if self.tokenizer.pad_token_id is None: raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''') UpperCAmelCase_ = ([prefix + arg for arg in args[0]],) UpperCAmelCase_ = True elif isinstance(args[0] , _snake_case): UpperCAmelCase_ = (prefix + args[0],) UpperCAmelCase_ = False else: raise ValueError( F""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""") UpperCAmelCase_ = self.tokenizer(*_snake_case , padding=_snake_case , truncation=_snake_case , return_tensors=self.framework) # This is produced by tokenizers but is an invalid generate kwargs if "token_type_ids" in inputs: del inputs["token_type_ids"] return inputs def __call__( self : Optional[Any] , *_snake_case : str , **_snake_case : Any): """simple docstring""" UpperCAmelCase_ = super().__call__(*_snake_case , **_snake_case) if ( isinstance(args[0] , _snake_case) and all(isinstance(_snake_case , _snake_case) for el in args[0]) and all(len(_snake_case) == 1 for res in result) ): return [res[0] for res in result] return result def lowerCamelCase ( self : Tuple , _snake_case : Any , _snake_case : Dict=TruncationStrategy.DO_NOT_TRUNCATE , **_snake_case : Tuple): """simple docstring""" UpperCAmelCase_ = self._parse_and_tokenize(_snake_case , truncation=_snake_case , **_snake_case) return inputs def lowerCamelCase ( self : Optional[int] , _snake_case : List[str] , **_snake_case : List[str]): """simple docstring""" if self.framework == "pt": UpperCAmelCase_ , UpperCAmelCase_ = model_inputs['''input_ids'''].shape elif self.framework == "tf": UpperCAmelCase_ , UpperCAmelCase_ = tf.shape(model_inputs['''input_ids''']).numpy() UpperCAmelCase_ = generate_kwargs.get('''min_length''' , self.model.config.min_length) UpperCAmelCase_ = generate_kwargs.get('''max_length''' , self.model.config.max_length) self.check_inputs(_snake_case , generate_kwargs['''min_length'''] , generate_kwargs['''max_length''']) UpperCAmelCase_ = self.model.generate(**_snake_case , **_snake_case) UpperCAmelCase_ = output_ids.shape[0] if self.framework == "pt": UpperCAmelCase_ = output_ids.reshape(_snake_case , out_b // in_b , *output_ids.shape[1:]) elif self.framework == "tf": UpperCAmelCase_ = tf.reshape(_snake_case , (in_b, out_b // in_b, *output_ids.shape[1:])) return {"output_ids": output_ids} def lowerCamelCase ( self : Optional[int] , _snake_case : List[Any] , _snake_case : Any=ReturnType.TEXT , _snake_case : Dict=False): """simple docstring""" UpperCAmelCase_ = [] for output_ids in model_outputs["output_ids"][0]: if return_type == ReturnType.TENSORS: UpperCAmelCase_ = {F"""{self.return_name}_token_ids""": output_ids} elif return_type == ReturnType.TEXT: UpperCAmelCase_ = { F"""{self.return_name}_text""": self.tokenizer.decode( _snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case , ) } records.append(_snake_case) return records @add_end_docstrings(a ) class __snake_case ( a ): UpperCAmelCase__ : Union[str, Any] = '''summary''' def __call__( self : Dict , *_snake_case : Dict , **_snake_case : Dict): """simple docstring""" return super().__call__(*_snake_case , **_snake_case) def lowerCamelCase ( self : Union[str, Any] , _snake_case : int , _snake_case : int , _snake_case : int): """simple docstring""" if max_length < min_length: logger.warning(F"""Your min_length={min_length} must be inferior than your max_length={max_length}.""") if input_length < max_length: logger.warning( F"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """ '''a summarization task, where outputs shorter than the input are typically wanted, you might ''' F"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""") @add_end_docstrings(a ) class __snake_case ( a ): UpperCAmelCase__ : Optional[Any] = '''translation''' def lowerCamelCase ( self : Any , _snake_case : int , _snake_case : int , _snake_case : int): """simple docstring""" if input_length > 0.9 * max_length: logger.warning( F"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """ '''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''') return True def lowerCamelCase ( self : Any , *_snake_case : Any , _snake_case : Optional[Any]=TruncationStrategy.DO_NOT_TRUNCATE , _snake_case : int=None , _snake_case : str=None): """simple docstring""" if getattr(self.tokenizer , '''_build_translation_inputs''' , _snake_case): return self.tokenizer._build_translation_inputs( *_snake_case , return_tensors=self.framework , truncation=_snake_case , src_lang=_snake_case , tgt_lang=_snake_case) else: return super()._parse_and_tokenize(*_snake_case , truncation=_snake_case) def lowerCamelCase ( self : List[Any] , _snake_case : Optional[Any]=None , _snake_case : Dict=None , **_snake_case : List[Any]): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = super()._sanitize_parameters(**_snake_case) if src_lang is not None: UpperCAmelCase_ = src_lang if tgt_lang is not None: UpperCAmelCase_ = tgt_lang if src_lang is None and tgt_lang is None: # Backward compatibility, direct arguments use is preferred. UpperCAmelCase_ = kwargs.get('''task''' , self.task) UpperCAmelCase_ = task.split('''_''') if task and len(_snake_case) == 4: # translation, XX, to YY UpperCAmelCase_ = items[1] UpperCAmelCase_ = items[3] return preprocess_params, forward_params, postprocess_params def __call__( self : List[Any] , *_snake_case : List[str] , **_snake_case : Optional[Any]): """simple docstring""" return super().__call__(*_snake_case , **_snake_case)
364
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class __snake_case ( unittest.TestCase ): @slow def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-base''') UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]]) # The dog is cute and lives in the garden house UpperCAmelCase_ = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim UpperCAmelCase_ = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach() self.assertEqual(output.shape , _snake_case) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3)) @slow def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-large''') UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]]) # The dog is cute and lives in the garden house UpperCAmelCase_ = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim UpperCAmelCase_ = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach() self.assertEqual(output.shape , _snake_case) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3))
7
0
"""simple docstring""" import argparse import json import subprocess def A (__A : List[str] , __A : Dict ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ = [] UpperCAmelCase_ = ( F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\"""" ''' https://api.github.com/repos/huggingface/transformers/actions/runners''' ) UpperCAmelCase_ = subprocess.run(__A , shell=__A , stdout=subprocess.PIPE ) UpperCAmelCase_ = output.stdout.decode('''utf-8''' ) UpperCAmelCase_ = json.loads(__A ) UpperCAmelCase_ = status['''runners'''] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(__A ) # save the result so we can report them on Slack with open('''offline_runners.txt''' , '''w''' ) as fp: fp.write(json.dumps(__A ) ) if len(__A ) > 0: UpperCAmelCase_ = '''\n'''.join([x['''name'''] for x in offline_runners] ) raise ValueError(F"""The following runners are offline:\n{failed}""" ) if __name__ == "__main__": def A (__A : Optional[int] ) -> List[Any]: """simple docstring""" return values.split(''',''' ) snake_case_ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--target_runners", default=None, type=list_str, required=True, help="Comma-separated list of runners to check status.", ) parser.add_argument( "--token", default=None, type=str, required=True, help="A token that has actions:read permission." ) snake_case_ : Union[str, Any] = parser.parse_args() get_runner_status(args.target_runners, args.token)
365
from maths.prime_factors import prime_factors def A (__A : int ) -> int: """simple docstring""" if not isinstance(__A , __A ): UpperCAmelCase_ = F"""Input value of [number={number}] must be an integer""" raise TypeError(__A ) if number < 1: raise ValueError('''Input must be a positive integer''' ) return -1 if len(prime_factors(__A ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
7
0
"""simple docstring""" import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class __snake_case ( a , unittest.TestCase ): UpperCAmelCase__ : Optional[Any] = FlaxAutoencoderKL @property def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = 4 UpperCAmelCase_ = 3 UpperCAmelCase_ = (32, 32) UpperCAmelCase_ = jax.random.PRNGKey(0) UpperCAmelCase_ = jax.random.uniform(_snake_case , ((batch_size, num_channels) + sizes)) return {"sample": image, "prng_key": prng_key} def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = { '''block_out_channels''': [32, 64], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 4, } UpperCAmelCase_ = self.dummy_input return init_dict, inputs_dict
366
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class __snake_case ( unittest.TestCase ): def lowerCamelCase ( self : Optional[int] , _snake_case : Union[str, Any]): """simple docstring""" for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss''']): UpperCAmelCase_ = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(_snake_case) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = '''sgugger/tiny-distilbert-classification''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , only_pretrain_model=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , torchscript=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''') def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , fpaa=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) # set architectures equal to `None` UpperCAmelCase_ = None UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config]) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) @unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''') def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_snake_case , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config]) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tinier_bart''' UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config]) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config]) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tinier_bart''' UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case) UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config]) UpperCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , save_to_csv=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_snake_case , '''inf_time.csv''') , train_memory_csv_file=os.path.join(_snake_case , '''train_mem.csv''') , inference_memory_csv_file=os.path.join(_snake_case , '''inf_mem.csv''') , train_time_csv_file=os.path.join(_snake_case , '''train_time.csv''') , env_info_csv_file=os.path.join(_snake_case , '''env.csv''') , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) benchmark.run() self.assertTrue(Path(os.path.join(_snake_case , '''inf_time.csv''')).exists()) self.assertTrue(Path(os.path.join(_snake_case , '''train_time.csv''')).exists()) self.assertTrue(Path(os.path.join(_snake_case , '''inf_mem.csv''')).exists()) self.assertTrue(Path(os.path.join(_snake_case , '''train_mem.csv''')).exists()) self.assertTrue(Path(os.path.join(_snake_case , '''env.csv''')).exists()) def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(_snake_case : Tuple): self.assertTrue(hasattr(_snake_case , '''sequential''')) self.assertTrue(hasattr(_snake_case , '''cumulative''')) self.assertTrue(hasattr(_snake_case , '''current''')) self.assertTrue(hasattr(_snake_case , '''total''')) with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_snake_case , '''log.txt''') , log_print=_snake_case , trace_memory_line_by_line=_snake_case , multi_process=_snake_case , ) UpperCAmelCase_ = PyTorchBenchmark(_snake_case) UpperCAmelCase_ = benchmark.run() _check_summary_is_not_empty(result.inference_summary) _check_summary_is_not_empty(result.train_summary) self.assertTrue(Path(os.path.join(_snake_case , '''log.txt''')).exists())
7
0
def A (__A : list , __A : int = 0 ) -> list: """simple docstring""" UpperCAmelCase_ = length or len(__A ) UpperCAmelCase_ = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: UpperCAmelCase_ , UpperCAmelCase_ = list_data[i + 1], list_data[i] UpperCAmelCase_ = True return list_data if not swapped else bubble_sort(__A , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
367
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def A (__A : BertModel , __A : str , __A : str ) -> int: """simple docstring""" UpperCAmelCase_ = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''') UpperCAmelCase_ = ( ('''layer.''', '''layer_'''), ('''word_embeddings.weight''', '''word_embeddings'''), ('''position_embeddings.weight''', '''position_embeddings'''), ('''token_type_embeddings.weight''', '''token_type_embeddings'''), ('''.''', '''/'''), ('''LayerNorm/weight''', '''LayerNorm/gamma'''), ('''LayerNorm/bias''', '''LayerNorm/beta'''), ('''weight''', '''kernel'''), ) if not os.path.isdir(__A ): os.makedirs(__A ) UpperCAmelCase_ = model.state_dict() def to_tf_var_name(__A : str ): for patt, repl in iter(__A ): UpperCAmelCase_ = name.replace(__A , __A ) return F"""bert/{name}""" def create_tf_var(__A : np.ndarray , __A : str , __A : tf.Session ): UpperCAmelCase_ = tf.dtypes.as_dtype(tensor.dtype ) UpperCAmelCase_ = tf.get_variable(dtype=__A , shape=tensor.shape , name=__A , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__A ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: UpperCAmelCase_ = to_tf_var_name(__A ) UpperCAmelCase_ = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): UpperCAmelCase_ = torch_tensor.T UpperCAmelCase_ = create_tf_var(tensor=__A , name=__A , session=__A ) tf.keras.backend.set_value(__A , __A ) UpperCAmelCase_ = session.run(__A ) print(F"""Successfully created {tf_name}: {np.allclose(__A , __A )}""" ) UpperCAmelCase_ = tf.train.Saver(tf.trainable_variables() ) saver.save(__A , os.path.join(__A , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) ) def A (__A : Any=None ) -> str: """simple docstring""" UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--model_name''' , type=__A , required=__A , help='''model name e.g. bert-base-uncased''' ) parser.add_argument( '''--cache_dir''' , type=__A , default=__A , required=__A , help='''Directory containing pytorch model''' ) parser.add_argument('''--pytorch_model_path''' , type=__A , required=__A , help='''/path/to/<pytorch-model-name>.bin''' ) parser.add_argument('''--tf_cache_dir''' , type=__A , required=__A , help='''Directory in which to save tensorflow model''' ) UpperCAmelCase_ = parser.parse_args(__A ) UpperCAmelCase_ = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=__A , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
7
0
from typing import Dict from .base import GenericTensor, Pipeline class __snake_case ( a ): def lowerCamelCase ( self : str , _snake_case : List[Any]=None , _snake_case : Optional[int]=None , _snake_case : Any=None , **_snake_case : Dict): """simple docstring""" if tokenize_kwargs is None: UpperCAmelCase_ = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( '''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''') UpperCAmelCase_ = truncation UpperCAmelCase_ = tokenize_kwargs UpperCAmelCase_ = {} if return_tensors is not None: UpperCAmelCase_ = return_tensors return preprocess_params, {}, postprocess_params def lowerCamelCase ( self : List[str] , _snake_case : Tuple , **_snake_case : Dict): """simple docstring""" UpperCAmelCase_ = self.framework UpperCAmelCase_ = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case) return model_inputs def lowerCamelCase ( self : Tuple , _snake_case : Optional[int]): """simple docstring""" UpperCAmelCase_ = self.model(**_snake_case) return model_outputs def lowerCamelCase ( self : List[str] , _snake_case : Any , _snake_case : int=False): """simple docstring""" if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : Any , *_snake_case : int , **_snake_case : List[Any]): """simple docstring""" return super().__call__(*_snake_case , **_snake_case)
368
import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __snake_case ( unittest.TestCase ): def __init__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict=3 , _snake_case : Dict=32 , _snake_case : List[str]=3 , _snake_case : Union[str, Any]=10 , _snake_case : Tuple=[10, 20, 30, 40] , _snake_case : Dict=[1, 1, 2, 1] , _snake_case : List[Any]=True , _snake_case : Dict=True , _snake_case : Union[str, Any]="relu" , _snake_case : Tuple=3 , _snake_case : Union[str, Any]=None , ): """simple docstring""" UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = image_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = embeddings_size UpperCAmelCase_ = hidden_sizes UpperCAmelCase_ = depths UpperCAmelCase_ = is_training UpperCAmelCase_ = use_labels UpperCAmelCase_ = hidden_act UpperCAmelCase_ = num_labels UpperCAmelCase_ = scope UpperCAmelCase_ = len(_snake_case) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) UpperCAmelCase_ = self.get_config() return config, pixel_values def lowerCamelCase ( self : List[Any]): """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowerCamelCase ( self : Optional[int] , _snake_case : List[Any] , _snake_case : Optional[int]): """simple docstring""" UpperCAmelCase_ = FlaxRegNetModel(config=_snake_case) UpperCAmelCase_ = model(_snake_case) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : Optional[int]): """simple docstring""" UpperCAmelCase_ = self.num_labels UpperCAmelCase_ = FlaxRegNetForImageClassification(config=_snake_case) UpperCAmelCase_ = model(_snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs UpperCAmelCase_ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_flax class __snake_case ( a , unittest.TestCase ): UpperCAmelCase__ : Union[str, Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () UpperCAmelCase__ : Tuple = False UpperCAmelCase__ : List[str] = False UpperCAmelCase__ : int = False def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = FlaxRegNetModelTester(self) UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case) def lowerCamelCase ( self : List[Any]): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase ( self : List[str]): """simple docstring""" return def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case) @unittest.skip(reason='''RegNet does not use inputs_embeds''') def lowerCamelCase ( self : Optional[Any]): """simple docstring""" pass @unittest.skip(reason='''RegNet does not support input and output embeddings''') def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" pass def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ = model_class(_snake_case) UpperCAmelCase_ = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ = [*signature.parameters.keys()] UpperCAmelCase_ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _snake_case) def lowerCamelCase ( self : Optional[int]): """simple docstring""" def check_hidden_states_output(_snake_case : List[str] , _snake_case : Dict , _snake_case : List[str]): UpperCAmelCase_ = model_class(_snake_case) UpperCAmelCase_ = model(**self._prepare_for_class(_snake_case , _snake_case)) UpperCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase_ = self.model_tester.num_stages self.assertEqual(len(_snake_case) , expected_num_stages + 1) UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ = True check_hidden_states_output(_snake_case , _snake_case , _snake_case) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ = True check_hidden_states_output(_snake_case , _snake_case , _snake_case) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): UpperCAmelCase_ = self._prepare_for_class(_snake_case , _snake_case) UpperCAmelCase_ = model_class(_snake_case) @jax.jit def model_jitted(_snake_case : str , **_snake_case : Union[str, Any]): return model(pixel_values=_snake_case , **_snake_case) with self.subTest('''JIT Enabled'''): UpperCAmelCase_ = model_jitted(**_snake_case).to_tuple() with self.subTest('''JIT Disabled'''): with jax.disable_jit(): UpperCAmelCase_ = model_jitted(**_snake_case).to_tuple() self.assertEqual(len(_snake_case) , len(_snake_case)) for jitted_output, output in zip(_snake_case , _snake_case): self.assertEqual(jitted_output.shape , output.shape) def A () -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_flax class __snake_case ( unittest.TestCase ): @cached_property def lowerCamelCase ( self : Dict): """simple docstring""" return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''') if is_vision_available() else None @slow def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''') UpperCAmelCase_ = self.default_image_processor UpperCAmelCase_ = prepare_img() UpperCAmelCase_ = image_processor(images=_snake_case , return_tensors='''np''') UpperCAmelCase_ = model(**_snake_case) # verify the logits UpperCAmelCase_ = (1, 1000) self.assertEqual(outputs.logits.shape , _snake_case) UpperCAmelCase_ = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6]) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4))
7
0
from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch snake_case_ : Optional[int] = logging.get_logger(__name__) @add_end_docstrings( a , r''' top_k (`int`, defaults to 5): The number of predictions to return. targets (`str` or `List[str]`, *optional*): When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower). ''' , ) class __snake_case ( a ): def lowerCamelCase ( self : Any , _snake_case : GenericTensor): """simple docstring""" if self.framework == "tf": UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id).numpy() elif self.framework == "pt": UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_snake_case) else: raise ValueError('''Unsupported framework''') return masked_index def lowerCamelCase ( self : Optional[Any] , _snake_case : GenericTensor): """simple docstring""" UpperCAmelCase_ = self.get_masked_index(_snake_case) UpperCAmelCase_ = np.prod(masked_index.shape) if numel < 1: raise PipelineException( '''fill-mask''' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , ) def lowerCamelCase ( self : List[Any] , _snake_case : GenericTensor): """simple docstring""" if isinstance(_snake_case , _snake_case): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0]) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(_snake_case) def lowerCamelCase ( self : List[str] , _snake_case : str , _snake_case : Union[str, Any]=None , **_snake_case : Dict): """simple docstring""" if return_tensors is None: UpperCAmelCase_ = self.framework UpperCAmelCase_ = self.tokenizer(_snake_case , return_tensors=_snake_case) self.ensure_exactly_one_mask_token(_snake_case) return model_inputs def lowerCamelCase ( self : Tuple , _snake_case : Any): """simple docstring""" UpperCAmelCase_ = self.model(**_snake_case) UpperCAmelCase_ = model_inputs['''input_ids'''] return model_outputs def lowerCamelCase ( self : Any , _snake_case : int , _snake_case : Union[str, Any]=5 , _snake_case : Optional[int]=None): """simple docstring""" if target_ids is not None and target_ids.shape[0] < top_k: UpperCAmelCase_ = target_ids.shape[0] UpperCAmelCase_ = model_outputs['''input_ids'''][0] UpperCAmelCase_ = model_outputs['''logits'''] if self.framework == "tf": UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()[:, 0] UpperCAmelCase_ = outputs.numpy() UpperCAmelCase_ = outputs[0, masked_index, :] UpperCAmelCase_ = stable_softmax(_snake_case , axis=-1) if target_ids is not None: UpperCAmelCase_ = tf.gather_nd(tf.squeeze(_snake_case , 0) , target_ids.reshape(-1 , 1)) UpperCAmelCase_ = tf.expand_dims(_snake_case , 0) UpperCAmelCase_ = tf.math.top_k(_snake_case , k=_snake_case) UpperCAmelCase_ , UpperCAmelCase_ = topk.values.numpy(), topk.indices.numpy() else: UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_snake_case).squeeze(-1) # Fill mask pipeline supports only one ${mask_token} per sample UpperCAmelCase_ = outputs[0, masked_index, :] UpperCAmelCase_ = logits.softmax(dim=-1) if target_ids is not None: UpperCAmelCase_ = probs[..., target_ids] UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(_snake_case) UpperCAmelCase_ = [] UpperCAmelCase_ = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist())): UpperCAmelCase_ = [] for v, p in zip(_values , _predictions): # Copy is important since we're going to modify this array in place UpperCAmelCase_ = input_ids.numpy().copy() if target_ids is not None: UpperCAmelCase_ = target_ids[p].tolist() UpperCAmelCase_ = p # Filter padding out: UpperCAmelCase_ = tokens[np.where(tokens != self.tokenizer.pad_token_id)] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back UpperCAmelCase_ = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case) UpperCAmelCase_ = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p]), '''sequence''': sequence} row.append(_snake_case) result.append(_snake_case) if single_mask: return result[0] return result def lowerCamelCase ( self : Union[str, Any] , _snake_case : List[Any] , _snake_case : Optional[Any]=None): """simple docstring""" if isinstance(_snake_case , _snake_case): UpperCAmelCase_ = [targets] try: UpperCAmelCase_ = self.tokenizer.get_vocab() except Exception: UpperCAmelCase_ = {} UpperCAmelCase_ = [] for target in targets: UpperCAmelCase_ = vocab.get(_snake_case , _snake_case) if id_ is None: UpperCAmelCase_ = self.tokenizer( _snake_case , add_special_tokens=_snake_case , return_attention_mask=_snake_case , return_token_type_ids=_snake_case , max_length=1 , truncation=_snake_case , )['''input_ids'''] if len(_snake_case) == 0: logger.warning( F"""The specified target token `{target}` does not exist in the model vocabulary. """ '''We cannot replace it with anything meaningful, ignoring it''') continue UpperCAmelCase_ = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( F"""The specified target token `{target}` does not exist in the model vocabulary. """ F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_)}`.""") target_ids.append(id_) UpperCAmelCase_ = list(set(_snake_case)) if len(_snake_case) == 0: raise ValueError('''At least one target must be provided when passed.''') UpperCAmelCase_ = np.array(_snake_case) return target_ids def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int]=None , _snake_case : List[Any]=None): """simple docstring""" UpperCAmelCase_ = {} if targets is not None: UpperCAmelCase_ = self.get_target_ids(_snake_case , _snake_case) UpperCAmelCase_ = target_ids if top_k is not None: UpperCAmelCase_ = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( '''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''') return {}, {}, postprocess_params def __call__( self : str , _snake_case : Dict , *_snake_case : Tuple , **_snake_case : str): """simple docstring""" UpperCAmelCase_ = super().__call__(_snake_case , **_snake_case) if isinstance(_snake_case , _snake_case) and len(_snake_case) == 1: return outputs[0] return outputs
369
import comet # From: unbabel-comet import torch import datasets snake_case_ : Tuple = datasets.logging.get_logger(__name__) snake_case_ : str = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n" snake_case_ : Tuple = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n" snake_case_ : Optional[int] = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __snake_case ( datasets.Metric ): def lowerCamelCase ( self : Any): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''sources''': datasets.Value('''string''' , id='''sequence'''), '''predictions''': datasets.Value('''string''' , id='''sequence'''), '''references''': datasets.Value('''string''' , id='''sequence'''), }) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[ '''https://github.com/Unbabel/COMET''', '''https://www.aclweb.org/anthology/2020.emnlp-main.213/''', '''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''', ] , ) def lowerCamelCase ( self : List[Any] , _snake_case : Optional[int]): """simple docstring""" if self.config_name == "default": UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''')) else: UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model(self.config_name)) def lowerCamelCase ( self : List[Any] , _snake_case : str , _snake_case : List[str] , _snake_case : Tuple , _snake_case : int=None , _snake_case : Optional[Any]=False): """simple docstring""" if gpus is None: UpperCAmelCase_ = 1 if torch.cuda.is_available() else 0 UpperCAmelCase_ = {'''src''': sources, '''mt''': predictions, '''ref''': references} UpperCAmelCase_ = [dict(zip(_snake_case , _snake_case)) for t in zip(*data.values())] UpperCAmelCase_ , UpperCAmelCase_ = self.scorer.predict(_snake_case , gpus=_snake_case , progress_bar=_snake_case) return {"mean_score": mean_score, "scores": scores}
7
0
import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor snake_case_ : int = logging.get_logger(__name__) class __snake_case ( a ): def __init__( self : Any , *_snake_case : Union[str, Any] , **_snake_case : List[str]): """simple docstring""" warnings.warn( '''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use CLIPImageProcessor instead.''' , _snake_case , ) super().__init__(*_snake_case , **_snake_case)
370
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class __snake_case ( a ): UpperCAmelCase__ : Optional[int] = (DPMSolverSinglestepScheduler,) UpperCAmelCase__ : str = (('''num_inference_steps''', 2_5),) def lowerCamelCase ( self : Dict , **_snake_case : Dict): """simple docstring""" UpperCAmelCase_ = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''prediction_type''': '''epsilon''', '''thresholding''': False, '''sample_max_value''': 1.0, '''algorithm_type''': '''dpmsolver++''', '''solver_type''': '''midpoint''', '''lambda_min_clipped''': -float('''inf'''), '''variance_type''': None, } config.update(**_snake_case) return config def lowerCamelCase ( self : Dict , _snake_case : int=0 , **_snake_case : List[Any]): """simple docstring""" UpperCAmelCase_ = dict(self.forward_default_kwargs) UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case) UpperCAmelCase_ = self.dummy_sample UpperCAmelCase_ = 0.1 * sample UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: UpperCAmelCase_ = self.get_scheduler_config(**_snake_case) UpperCAmelCase_ = scheduler_class(**_snake_case) scheduler.set_timesteps(_snake_case) # copy over dummy past residuals UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_snake_case) UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case) new_scheduler.set_timesteps(_snake_case) # copy over dummy past residuals UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order] UpperCAmelCase_ , UpperCAmelCase_ = sample, sample for t in range(_snake_case , time_step + scheduler.config.solver_order + 1): UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def lowerCamelCase ( self : Tuple): """simple docstring""" pass def lowerCamelCase ( self : Tuple , _snake_case : Optional[Any]=0 , **_snake_case : int): """simple docstring""" UpperCAmelCase_ = dict(self.forward_default_kwargs) UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case) UpperCAmelCase_ = self.dummy_sample UpperCAmelCase_ = 0.1 * sample UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: UpperCAmelCase_ = self.get_scheduler_config() UpperCAmelCase_ = scheduler_class(**_snake_case) scheduler.set_timesteps(_snake_case) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_snake_case) UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case) # copy over dummy past residuals new_scheduler.set_timesteps(_snake_case) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order] UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def lowerCamelCase ( self : Dict , _snake_case : int=None , **_snake_case : Optional[Any]): """simple docstring""" if scheduler is None: UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config(**_snake_case) UpperCAmelCase_ = scheduler_class(**_snake_case) UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config(**_snake_case) UpperCAmelCase_ = scheduler_class(**_snake_case) UpperCAmelCase_ = 10 UpperCAmelCase_ = self.dummy_model() UpperCAmelCase_ = self.dummy_sample_deter scheduler.set_timesteps(_snake_case) for i, t in enumerate(scheduler.timesteps): UpperCAmelCase_ = model(_snake_case , _snake_case) UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample return sample def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config()) UpperCAmelCase_ = 50 UpperCAmelCase_ = self.dummy_model() UpperCAmelCase_ = self.dummy_sample_deter scheduler.set_timesteps(_snake_case) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:]): UpperCAmelCase_ = model(_snake_case , _snake_case) UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.2_5_7_4) < 1e-3 def lowerCamelCase ( self : int): """simple docstring""" for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=_snake_case) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config()) UpperCAmelCase_ = self.full_loop(scheduler=_snake_case) UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3 UpperCAmelCase_ = DEISMultistepScheduler.from_config(scheduler.config) UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config) UpperCAmelCase_ = UniPCMultistepScheduler.from_config(scheduler.config) UpperCAmelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config) UpperCAmelCase_ = self.full_loop(scheduler=_snake_case) UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3 def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" self.check_over_configs(thresholding=_snake_case) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , algorithm_type='''dpmsolver++''' , solver_order=_snake_case , solver_type=_snake_case , ) def lowerCamelCase ( self : Dict): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_snake_case) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , ) UpperCAmelCase_ = self.full_loop( solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , ) assert not torch.isnan(_snake_case).any(), "Samples have nan numbers" def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" self.check_over_configs(lower_order_final=_snake_case) self.check_over_configs(lower_order_final=_snake_case) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" self.check_over_configs(lambda_min_clipped=-float('''inf''')) self.check_over_configs(lambda_min_clipped=-5.1) def lowerCamelCase ( self : int): """simple docstring""" self.check_over_configs(variance_type=_snake_case) self.check_over_configs(variance_type='''learned_range''') def lowerCamelCase ( self : Optional[Any]): """simple docstring""" for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=_snake_case , time_step=0) def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = self.full_loop() UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3 def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = self.full_loop(use_karras_sigmas=_snake_case) UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.2_2_4_8) < 1e-3 def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''') UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.1_4_5_3) < 1e-3 def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_snake_case) UpperCAmelCase_ = torch.mean(torch.abs(_snake_case)) assert abs(result_mean.item() - 0.0_6_4_9) < 1e-3 def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config(thresholding=_snake_case , dynamic_thresholding_ratio=0) UpperCAmelCase_ = scheduler_class(**_snake_case) UpperCAmelCase_ = 10 UpperCAmelCase_ = self.dummy_model() UpperCAmelCase_ = self.dummy_sample_deter.half() scheduler.set_timesteps(_snake_case) for i, t in enumerate(scheduler.timesteps): UpperCAmelCase_ = model(_snake_case , _snake_case) UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample assert sample.dtype == torch.floataa
7
0
from collections import deque from math import floor from random import random from time import time class __snake_case : def __init__( self : List[str]): """simple docstring""" UpperCAmelCase_ = {} def lowerCamelCase ( self : Union[str, Any] , _snake_case : List[Any] , _snake_case : Dict , _snake_case : str=1): """simple docstring""" if self.graph.get(_snake_case): if self.graph[u].count([w, v]) == 0: self.graph[u].append([w, v]) else: UpperCAmelCase_ = [[w, v]] if not self.graph.get(_snake_case): UpperCAmelCase_ = [] def lowerCamelCase ( self : Any): """simple docstring""" return list(self.graph) def lowerCamelCase ( self : List[Any] , _snake_case : Union[str, Any] , _snake_case : Optional[Any]): """simple docstring""" if self.graph.get(_snake_case): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(_snake_case) def lowerCamelCase ( self : Optional[Any] , _snake_case : List[Any]=-2 , _snake_case : Optional[Any]=-1): """simple docstring""" if s == d: return [] UpperCAmelCase_ = [] UpperCAmelCase_ = [] if s == -2: UpperCAmelCase_ = list(self.graph)[0] stack.append(_snake_case) visited.append(_snake_case) UpperCAmelCase_ = s while True: # check if there is any non isolated nodes if len(self.graph[s]) != 0: UpperCAmelCase_ = s for node in self.graph[s]: if visited.count(node[1]) < 1: if node[1] == d: visited.append(_snake_case) return visited else: stack.append(node[1]) visited.append(node[1]) UpperCAmelCase_ = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(_snake_case) != 0: UpperCAmelCase_ = stack[len(_snake_case) - 1] else: UpperCAmelCase_ = ss # check if se have reached the starting point if len(_snake_case) == 0: return visited def lowerCamelCase ( self : Union[str, Any] , _snake_case : Union[str, Any]=-1): """simple docstring""" if c == -1: UpperCAmelCase_ = floor(random() * 10000) + 10 for i in range(_snake_case): # every vertex has max 100 edges for _ in range(floor(random() * 102) + 1): UpperCAmelCase_ = floor(random() * c) + 1 if n != i: self.add_pair(_snake_case , _snake_case , 1) def lowerCamelCase ( self : Union[str, Any] , _snake_case : List[str]=-2): """simple docstring""" UpperCAmelCase_ = deque() UpperCAmelCase_ = [] if s == -2: UpperCAmelCase_ = list(self.graph)[0] d.append(_snake_case) visited.append(_snake_case) while d: UpperCAmelCase_ = d.popleft() if len(self.graph[s]) != 0: for node in self.graph[s]: if visited.count(node[1]) < 1: d.append(node[1]) visited.append(node[1]) return visited def lowerCamelCase ( self : int , _snake_case : Any): """simple docstring""" UpperCAmelCase_ = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def lowerCamelCase ( self : List[Any] , _snake_case : Union[str, Any]): """simple docstring""" return len(self.graph[u]) def lowerCamelCase ( self : List[Any] , _snake_case : Tuple=-2): """simple docstring""" UpperCAmelCase_ = [] UpperCAmelCase_ = [] if s == -2: UpperCAmelCase_ = list(self.graph)[0] stack.append(_snake_case) visited.append(_snake_case) UpperCAmelCase_ = s UpperCAmelCase_ = [] while True: # check if there is any non isolated nodes if len(self.graph[s]) != 0: UpperCAmelCase_ = s for node in self.graph[s]: if visited.count(node[1]) < 1: stack.append(node[1]) visited.append(node[1]) UpperCAmelCase_ = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop()) if len(_snake_case) != 0: UpperCAmelCase_ = stack[len(_snake_case) - 1] else: UpperCAmelCase_ = ss # check if se have reached the starting point if len(_snake_case) == 0: return sorted_nodes def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = [] UpperCAmelCase_ = [] UpperCAmelCase_ = list(self.graph)[0] stack.append(_snake_case) visited.append(_snake_case) UpperCAmelCase_ = -2 UpperCAmelCase_ = [] UpperCAmelCase_ = s UpperCAmelCase_ = False UpperCAmelCase_ = set() while True: # check if there is any non isolated nodes if len(self.graph[s]) != 0: UpperCAmelCase_ = s for node in self.graph[s]: if ( visited.count(node[1]) > 0 and node[1] != parent and indirect_parents.count(node[1]) > 0 and not on_the_way_back ): UpperCAmelCase_ = len(_snake_case) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1]) break else: anticipating_nodes.add(stack[len_stack]) len_stack -= 1 if visited.count(node[1]) < 1: stack.append(node[1]) visited.append(node[1]) UpperCAmelCase_ = node[1] break # check if all the children are visited if s == ss: stack.pop() UpperCAmelCase_ = True if len(_snake_case) != 0: UpperCAmelCase_ = stack[len(_snake_case) - 1] else: UpperCAmelCase_ = False indirect_parents.append(_snake_case) UpperCAmelCase_ = s UpperCAmelCase_ = ss # check if se have reached the starting point if len(_snake_case) == 0: return list(_snake_case) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = [] UpperCAmelCase_ = [] UpperCAmelCase_ = list(self.graph)[0] stack.append(_snake_case) visited.append(_snake_case) UpperCAmelCase_ = -2 UpperCAmelCase_ = [] UpperCAmelCase_ = s UpperCAmelCase_ = False UpperCAmelCase_ = set() while True: # check if there is any non isolated nodes if len(self.graph[s]) != 0: UpperCAmelCase_ = s for node in self.graph[s]: if ( visited.count(node[1]) > 0 and node[1] != parent and indirect_parents.count(node[1]) > 0 and not on_the_way_back ): UpperCAmelCase_ = len(_snake_case) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1]) break else: return True if visited.count(node[1]) < 1: stack.append(node[1]) visited.append(node[1]) UpperCAmelCase_ = node[1] break # check if all the children are visited if s == ss: stack.pop() UpperCAmelCase_ = True if len(_snake_case) != 0: UpperCAmelCase_ = stack[len(_snake_case) - 1] else: UpperCAmelCase_ = False indirect_parents.append(_snake_case) UpperCAmelCase_ = s UpperCAmelCase_ = ss # check if se have reached the starting point if len(_snake_case) == 0: return False def lowerCamelCase ( self : Any , _snake_case : Optional[Any]=-2 , _snake_case : int=-1): """simple docstring""" UpperCAmelCase_ = time() self.dfs(_snake_case , _snake_case) UpperCAmelCase_ = time() return end - begin def lowerCamelCase ( self : Tuple , _snake_case : Union[str, Any]=-2): """simple docstring""" UpperCAmelCase_ = time() self.bfs(_snake_case) UpperCAmelCase_ = time() return end - begin class __snake_case : def __init__( self : List[Any]): """simple docstring""" UpperCAmelCase_ = {} def lowerCamelCase ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : Any , _snake_case : Tuple=1): """simple docstring""" if self.graph.get(_snake_case): # if there already is a edge if self.graph[u].count([w, v]) == 0: self.graph[u].append([w, v]) else: # if u does not exist UpperCAmelCase_ = [[w, v]] # add the other way if self.graph.get(_snake_case): # if there already is a edge if self.graph[v].count([w, u]) == 0: self.graph[v].append([w, u]) else: # if u does not exist UpperCAmelCase_ = [[w, u]] def lowerCamelCase ( self : Dict , _snake_case : Optional[Any] , _snake_case : Optional[Any]): """simple docstring""" if self.graph.get(_snake_case): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(_snake_case) # the other way round if self.graph.get(_snake_case): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(_snake_case) def lowerCamelCase ( self : int , _snake_case : List[Any]=-2 , _snake_case : List[Any]=-1): """simple docstring""" if s == d: return [] UpperCAmelCase_ = [] UpperCAmelCase_ = [] if s == -2: UpperCAmelCase_ = list(self.graph)[0] stack.append(_snake_case) visited.append(_snake_case) UpperCAmelCase_ = s while True: # check if there is any non isolated nodes if len(self.graph[s]) != 0: UpperCAmelCase_ = s for node in self.graph[s]: if visited.count(node[1]) < 1: if node[1] == d: visited.append(_snake_case) return visited else: stack.append(node[1]) visited.append(node[1]) UpperCAmelCase_ = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(_snake_case) != 0: UpperCAmelCase_ = stack[len(_snake_case) - 1] else: UpperCAmelCase_ = ss # check if se have reached the starting point if len(_snake_case) == 0: return visited def lowerCamelCase ( self : int , _snake_case : Tuple=-1): """simple docstring""" if c == -1: UpperCAmelCase_ = floor(random() * 10000) + 10 for i in range(_snake_case): # every vertex has max 100 edges for _ in range(floor(random() * 102) + 1): UpperCAmelCase_ = floor(random() * c) + 1 if n != i: self.add_pair(_snake_case , _snake_case , 1) def lowerCamelCase ( self : Union[str, Any] , _snake_case : Dict=-2): """simple docstring""" UpperCAmelCase_ = deque() UpperCAmelCase_ = [] if s == -2: UpperCAmelCase_ = list(self.graph)[0] d.append(_snake_case) visited.append(_snake_case) while d: UpperCAmelCase_ = d.popleft() if len(self.graph[s]) != 0: for node in self.graph[s]: if visited.count(node[1]) < 1: d.append(node[1]) visited.append(node[1]) return visited def lowerCamelCase ( self : List[str] , _snake_case : Tuple): """simple docstring""" return len(self.graph[u]) def lowerCamelCase ( self : Any): """simple docstring""" UpperCAmelCase_ = [] UpperCAmelCase_ = [] UpperCAmelCase_ = list(self.graph)[0] stack.append(_snake_case) visited.append(_snake_case) UpperCAmelCase_ = -2 UpperCAmelCase_ = [] UpperCAmelCase_ = s UpperCAmelCase_ = False UpperCAmelCase_ = set() while True: # check if there is any non isolated nodes if len(self.graph[s]) != 0: UpperCAmelCase_ = s for node in self.graph[s]: if ( visited.count(node[1]) > 0 and node[1] != parent and indirect_parents.count(node[1]) > 0 and not on_the_way_back ): UpperCAmelCase_ = len(_snake_case) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1]) break else: anticipating_nodes.add(stack[len_stack]) len_stack -= 1 if visited.count(node[1]) < 1: stack.append(node[1]) visited.append(node[1]) UpperCAmelCase_ = node[1] break # check if all the children are visited if s == ss: stack.pop() UpperCAmelCase_ = True if len(_snake_case) != 0: UpperCAmelCase_ = stack[len(_snake_case) - 1] else: UpperCAmelCase_ = False indirect_parents.append(_snake_case) UpperCAmelCase_ = s UpperCAmelCase_ = ss # check if se have reached the starting point if len(_snake_case) == 0: return list(_snake_case) def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = [] UpperCAmelCase_ = [] UpperCAmelCase_ = list(self.graph)[0] stack.append(_snake_case) visited.append(_snake_case) UpperCAmelCase_ = -2 UpperCAmelCase_ = [] UpperCAmelCase_ = s UpperCAmelCase_ = False UpperCAmelCase_ = set() while True: # check if there is any non isolated nodes if len(self.graph[s]) != 0: UpperCAmelCase_ = s for node in self.graph[s]: if ( visited.count(node[1]) > 0 and node[1] != parent and indirect_parents.count(node[1]) > 0 and not on_the_way_back ): UpperCAmelCase_ = len(_snake_case) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1]) break else: return True if visited.count(node[1]) < 1: stack.append(node[1]) visited.append(node[1]) UpperCAmelCase_ = node[1] break # check if all the children are visited if s == ss: stack.pop() UpperCAmelCase_ = True if len(_snake_case) != 0: UpperCAmelCase_ = stack[len(_snake_case) - 1] else: UpperCAmelCase_ = False indirect_parents.append(_snake_case) UpperCAmelCase_ = s UpperCAmelCase_ = ss # check if se have reached the starting point if len(_snake_case) == 0: return False def lowerCamelCase ( self : List[str]): """simple docstring""" return list(self.graph) def lowerCamelCase ( self : List[str] , _snake_case : str=-2 , _snake_case : Optional[int]=-1): """simple docstring""" UpperCAmelCase_ = time() self.dfs(_snake_case , _snake_case) UpperCAmelCase_ = time() return end - begin def lowerCamelCase ( self : Any , _snake_case : Tuple=-2): """simple docstring""" UpperCAmelCase_ = time() self.bfs(_snake_case) UpperCAmelCase_ = time() return end - begin
371
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) snake_case_ : List[Any] = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Tuple = ["DeiTFeatureExtractor"] snake_case_ : List[str] = ["DeiTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : List[Any] = [ "DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "DeiTForImageClassification", "DeiTForImageClassificationWithTeacher", "DeiTForMaskedImageModeling", "DeiTModel", "DeiTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Dict = [ "TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDeiTForImageClassification", "TFDeiTForImageClassificationWithTeacher", "TFDeiTForMaskedImageModeling", "TFDeiTModel", "TFDeiTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys snake_case_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
7
0
def A (__A : str , __A : str ) -> Optional[Any]: """simple docstring""" assert x is not None assert y is not None UpperCAmelCase_ = len(__A ) UpperCAmelCase_ = len(__A ) # declaring the array for storing the dp values UpperCAmelCase_ = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741 for i in range(1 , m + 1 ): for j in range(1 , n + 1 ): UpperCAmelCase_ = 1 if x[i - 1] == y[j - 1] else 0 UpperCAmelCase_ = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match ) UpperCAmelCase_ = '''''' UpperCAmelCase_ , UpperCAmelCase_ = m, n while i > 0 and j > 0: UpperCAmelCase_ = 1 if x[i - 1] == y[j - 1] else 0 if l[i][j] == l[i - 1][j - 1] + match: if match == 1: UpperCAmelCase_ = x[i - 1] + seq i -= 1 j -= 1 elif l[i][j] == l[i - 1][j]: i -= 1 else: j -= 1 return l[m][n], seq if __name__ == "__main__": snake_case_ : Tuple = "AGGTAB" snake_case_ : Optional[int] = "GXTXAYB" snake_case_ : int = 4 snake_case_ : Optional[Any] = "GTAB" snake_case_ : List[Any] = longest_common_subsequence(a, b) print("len =", ln, ", sub-sequence =", subseq) import doctest doctest.testmod()
350
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets snake_case_ : Dict = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n" snake_case_ : List[str] = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n" snake_case_ : List[Any] = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __snake_case ( datasets.Metric ): def lowerCamelCase ( self : Tuple): """simple docstring""" if version.parse(scb.__version__) < version.parse('''1.4.12'''): raise ImportWarning( '''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n''' '''You can install it with `pip install "sacrebleu>=1.4.12"`.''') return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence'''), '''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''') , id='''references'''), }) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[ '''https://github.com/jhclark/tercom''', ] , ) def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , ): """simple docstring""" UpperCAmelCase_ = len(references[0]) if any(len(_snake_case) != references_per_prediction for refs in references): raise ValueError('''Sacrebleu requires the same number of references for each prediction''') UpperCAmelCase_ = [[refs[i] for refs in references] for i in range(_snake_case)] UpperCAmelCase_ = TER( normalized=_snake_case , no_punct=_snake_case , asian_support=_snake_case , case_sensitive=_snake_case , ) UpperCAmelCase_ = sb_ter.corpus_score(_snake_case , _snake_case) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
7
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available snake_case_ : List[str] = { "configuration_mask2former": [ "MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "Mask2FormerConfig", ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Optional[Any] = ["Mask2FormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : str = [ "MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "Mask2FormerForUniversalSegmentation", "Mask2FormerModel", "Mask2FormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys snake_case_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
351
import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class __snake_case ( unittest.TestCase , a ): def lowerCamelCase ( self : Optional[Any]): """simple docstring""" UpperCAmelCase_ = load_tool('''text-to-speech''') self.tool.setup() def lowerCamelCase ( self : int): """simple docstring""" torch.manual_seed(0) UpperCAmelCase_ = self.tool('''hey''') UpperCAmelCase_ = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , )) def lowerCamelCase ( self : Any): """simple docstring""" torch.manual_seed(0) UpperCAmelCase_ = self.tool('''hey''') UpperCAmelCase_ = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , ))
7
0