code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 650, "eval_accuracy": 0.6, "eval_loss": 0.9}, }, { "framework": "tensorflow", "script": "run_tf.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.3, "eval_loss": 0.9}, }, ] ) class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Dict ) ->Dict: if self.framework == "pytorch": subprocess.run( f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=_UpperCamelCase , ) assert hasattr(self , '''env''' ) def snake_case__( self : str , _UpperCamelCase : Tuple=1 ) ->List[str]: # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-single''' , instance_count=_UpperCamelCase , instance_type=self.instance_type , debugger_hook_config=_UpperCamelCase , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , ) def snake_case__( self : Optional[int] , _UpperCamelCase : Optional[int] ) ->int: TrainingJobAnalytics(_UpperCamelCase ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' ) def snake_case__( self : int ) ->List[str]: # create estimator snake_case_ = self.create_estimator() # run training estimator.fit() # result dataframe snake_case_ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis snake_case_ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) snake_case_ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping snake_case_ = ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(f'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _UpperCamelCase )
8
from __future__ import annotations def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return [ord(SCREAMING_SNAKE_CASE__ ) - 96 for elem in plain] def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return "".join(chr(elem + 96 ) for elem in encoded ) def __SCREAMING_SNAKE_CASE (): snake_case_ = encode(input('''-> ''' ).strip().lower() ) print('''Encoded: ''' , SCREAMING_SNAKE_CASE__ ) print('''Decoded:''' , decode(SCREAMING_SNAKE_CASE__ ) ) if __name__ == "__main__": main()
8
1
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Any , _UpperCamelCase : Any , _UpperCamelCase : Tuple ) ->List[Any]: return f'''gaussian_noise_s={seed}_shape={'_'.join([str(_UpperCamelCase ) for s in shape] )}.npy''' def snake_case__( self : Any ) ->List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() def snake_case__( self : int , _UpperCamelCase : Union[str, Any]=0 , _UpperCamelCase : int=(4, 4, 6_4, 6_4) , _UpperCamelCase : Optional[int]=False ) ->Tuple: snake_case_ = jnp.bfloataa if fpaa else jnp.floataa snake_case_ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase , _UpperCamelCase ) ) , dtype=_UpperCamelCase ) return image def snake_case__( self : List[Any] , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Optional[int]="CompVis/stable-diffusion-v1-4" ) ->Optional[Any]: snake_case_ = jnp.bfloataa if fpaa else jnp.floataa snake_case_ = '''bf16''' if fpaa else None snake_case_, snake_case_ = FlaxUNetaDConditionModel.from_pretrained( _UpperCamelCase , subfolder='''unet''' , dtype=_UpperCamelCase , revision=_UpperCamelCase ) return model, params def snake_case__( self : Dict , _UpperCamelCase : List[Any]=0 , _UpperCamelCase : Tuple=(4, 7_7, 7_6_8) , _UpperCamelCase : List[Any]=False ) ->int: snake_case_ = jnp.bfloataa if fpaa else jnp.floataa snake_case_ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase , _UpperCamelCase ) ) , dtype=_UpperCamelCase ) return hidden_states @parameterized.expand( [ # fmt: off [8_3, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], [1_7, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], [3, 1_0_0_0, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], # fmt: on ] ) def snake_case__( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] ) ->Union[str, Any]: snake_case_, snake_case_ = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=_UpperCamelCase ) snake_case_ = self.get_latents(_UpperCamelCase , fpaa=_UpperCamelCase ) snake_case_ = self.get_encoder_hidden_states(_UpperCamelCase , fpaa=_UpperCamelCase ) snake_case_ = model.apply( {'''params''': params} , _UpperCamelCase , jnp.array(_UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCamelCase , ).sample assert sample.shape == latents.shape snake_case_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) snake_case_ = jnp.array(_UpperCamelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [8_3, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], [1_7, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], [3, 1_0_0_0, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], # fmt: on ] ) def snake_case__( self : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str ) ->Dict: snake_case_, snake_case_ = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=_UpperCamelCase ) snake_case_ = self.get_latents(_UpperCamelCase , shape=(4, 4, 9_6, 9_6) , fpaa=_UpperCamelCase ) snake_case_ = self.get_encoder_hidden_states(_UpperCamelCase , shape=(4, 7_7, 1_0_2_4) , fpaa=_UpperCamelCase ) snake_case_ = model.apply( {'''params''': params} , _UpperCamelCase , jnp.array(_UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCamelCase , ).sample assert sample.shape == latents.shape snake_case_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) snake_case_ = jnp.array(_UpperCamelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-2 )
8
import math def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(SCREAMING_SNAKE_CASE__ ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError('''This should never happen''' ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. lowerCAmelCase_ = '''Enter the base and the power separated by a comma: ''' lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''',''')) lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''',''')) # We find the log of each number, using the function res(), which takes two # arguments. lowerCAmelCase_ = res(xa, ya) lowerCAmelCase_ = res(xa, ya) # We check for the largest number if resa > resa: print('''Largest number is''', xa, '''^''', ya) elif resa > resa: print('''Largest number is''', xa, '''^''', ya) else: print('''Both are equal''')
8
1
import warnings from typing import Dict, List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = ["input_ids", "attention_mask"] def __init__( self : List[Any] , _UpperCamelCase : str="</s>" , _UpperCamelCase : List[Any]="<unk>" , _UpperCamelCase : Optional[int]="<pad>" , _UpperCamelCase : Optional[int]=1_2_5 , _UpperCamelCase : Dict=None , **_UpperCamelCase : Tuple , ) ->None: # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: snake_case_ = [f'''<extra_id_{i}>''' for i in range(_UpperCamelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens snake_case_ = len(set(filter(lambda _UpperCamelCase : bool('''extra_id''' in str(_UpperCamelCase ) ) , _UpperCamelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are''' ''' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the''' ''' extra_ids tokens''' ) snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else pad_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else eos_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else unk_token super().__init__( eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , extra_ids=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , **_UpperCamelCase , ) snake_case_ = extra_ids snake_case_ = 2**8 # utf is 8 bits # define special tokens dict snake_case_ = { self.pad_token: 0, self.eos_token: 1, self.unk_token: 2, } snake_case_ = len(self.special_tokens_encoder ) snake_case_ = len(_UpperCamelCase ) for i, token in enumerate(_UpperCamelCase ): snake_case_ = self.vocab_size + i - n snake_case_ = {v: k for k, v in self.special_tokens_encoder.items()} @property def snake_case__( self : Optional[int] ) ->List[str]: return self._utf_vocab_size + self._num_special_tokens + self._extra_ids def snake_case__( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) ->List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(_UpperCamelCase )) + [1] return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1] def snake_case__( self : Union[str, Any] , _UpperCamelCase : List[int] ) ->List[int]: if len(_UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated''' ''' eos tokens being added.''' ) return token_ids else: return token_ids + [self.eos_token_id] def snake_case__( self : Optional[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: snake_case_ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def snake_case__( self : Optional[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: snake_case_ = self._add_eos_if_not_present(_UpperCamelCase ) if token_ids_a is None: return token_ids_a else: snake_case_ = self._add_eos_if_not_present(_UpperCamelCase ) return token_ids_a + token_ids_a def snake_case__( self : Optional[int] , _UpperCamelCase : str ) ->List[str]: snake_case_ = [chr(_UpperCamelCase ) for i in text.encode('''utf-8''' )] return tokens def snake_case__( self : List[str] , _UpperCamelCase : Union[str, Any] ) ->Optional[Any]: if token in self.special_tokens_encoder: snake_case_ = self.special_tokens_encoder[token] elif token in self.added_tokens_encoder: snake_case_ = self.added_tokens_encoder[token] elif len(_UpperCamelCase ) != 1: snake_case_ = self.unk_token_id else: snake_case_ = ord(_UpperCamelCase ) + self._num_special_tokens return token_id def snake_case__( self : int , _UpperCamelCase : Tuple ) ->Union[str, Any]: if index in self.special_tokens_decoder: snake_case_ = self.special_tokens_decoder[index] else: snake_case_ = chr(index - self._num_special_tokens ) return token def snake_case__( self : int , _UpperCamelCase : List[Any] ) ->str: snake_case_ = B'''''' for token in tokens: if token in self.special_tokens_decoder: snake_case_ = self.special_tokens_decoder[token].encode('''utf-8''' ) elif token in self.added_tokens_decoder: snake_case_ = self.special_tokens_decoder[token].encode('''utf-8''' ) elif token in self.special_tokens_encoder: snake_case_ = token.encode('''utf-8''' ) elif token in self.added_tokens_encoder: snake_case_ = token.encode('''utf-8''' ) else: snake_case_ = bytes([ord(_UpperCamelCase )] ) bstring += tok_string snake_case_ = bstring.decode('''utf-8''' , errors='''ignore''' ) return string def snake_case__( self : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]: return ()
8
import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'''vocab_file''': '''spiece.model'''} lowerCAmelCase_ = { '''vocab_file''': { '''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''', '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model''' ), } } lowerCAmelCase_ = { '''google/bigbird-roberta-base''': 40_96, '''google/bigbird-roberta-large''': 40_96, '''google/bigbird-base-trivia-itc''': 40_96, } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : List[Any] = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE : List[int] = [] def __init__( self : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict="<unk>" , _UpperCamelCase : List[str]="<s>" , _UpperCamelCase : Tuple="</s>" , _UpperCamelCase : Any="<pad>" , _UpperCamelCase : Any="[SEP]" , _UpperCamelCase : Optional[Any]="[MASK]" , _UpperCamelCase : Any="[CLS]" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Dict , ) ->None: snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else bos_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else eos_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else unk_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else pad_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else cls_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else sep_token # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , sep_token=_UpperCamelCase , mask_token=_UpperCamelCase , cls_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , ) snake_case_ = vocab_file snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCamelCase ) @property def snake_case__( self : str ) ->List[Any]: return self.sp_model.get_piece_size() def snake_case__( self : int ) ->Union[str, Any]: snake_case_ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Tuple ) ->Any: snake_case_ = self.__dict__.copy() snake_case_ = None return state def __setstate__( self : str , _UpperCamelCase : List[Any] ) ->List[str]: snake_case_ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): snake_case_ = {} snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def snake_case__( self : Optional[int] , _UpperCamelCase : str ) ->List[str]: return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase ) def snake_case__( self : str , _UpperCamelCase : List[str] ) ->Tuple: return self.sp_model.piece_to_id(_UpperCamelCase ) def snake_case__( self : Union[str, Any] , _UpperCamelCase : str ) ->List[Any]: snake_case_ = self.sp_model.IdToPiece(_UpperCamelCase ) return token def snake_case__( self : Dict , _UpperCamelCase : Optional[int] ) ->List[str]: snake_case_ = [] snake_case_ = '''''' snake_case_ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_UpperCamelCase ) + token snake_case_ = True snake_case_ = [] else: current_sub_tokens.append(_UpperCamelCase ) snake_case_ = False out_string += self.sp_model.decode(_UpperCamelCase ) return out_string.strip() def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : bool = False , _UpperCamelCase : bool = None , _UpperCamelCase : bool = True , **_UpperCamelCase : List[str] , ) ->str: snake_case_ = kwargs.pop('''use_source_tokenizer''' , _UpperCamelCase ) snake_case_ = self.convert_ids_to_tokens(_UpperCamelCase , skip_special_tokens=_UpperCamelCase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 snake_case_ = [] snake_case_ = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) ) snake_case_ = [] sub_texts.append(_UpperCamelCase ) else: current_sub_text.append(_UpperCamelCase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: snake_case_ = re.sub(R''' (\[(MASK|SEP)\])''' , R'''\1''' , ''' '''.join(_UpperCamelCase ) ) else: snake_case_ = ''''''.join(_UpperCamelCase ) snake_case_ = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: snake_case_ = self.clean_up_tokenization(_UpperCamelCase ) return clean_text else: return text def snake_case__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]: if not os.path.isdir(_UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case_ = os.path.join( _UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCamelCase , '''wb''' ) as fi: snake_case_ = self.sp_model.serialized_model_proto() fi.write(_UpperCamelCase ) return (out_vocab_file,) def snake_case__( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def snake_case__( self : List[str] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) ->List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCamelCase )) + [1] return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1] def snake_case__( self : List[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
8
1
import argparse import torch from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel from transformers.utils import logging logging.set_verbosity_info() def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # Initialise PyTorch model snake_case_ = FunnelConfig.from_json_file(SCREAMING_SNAKE_CASE__ ) print(F'''Building PyTorch model from configuration: {config}''' ) snake_case_ = FunnelBaseModel(SCREAMING_SNAKE_CASE__ ) if base_model else FunnelModel(SCREAMING_SNAKE_CASE__ ) # Load weights from tf checkpoint load_tf_weights_in_funnel(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.''' ) lowerCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model )
8
from __future__ import annotations from collections.abc import Generator def __SCREAMING_SNAKE_CASE (): snake_case_ = {} snake_case_ = 2 while True: snake_case_ = factor_map.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if factor: snake_case_ = factor + prime while x in factor_map: x += factor snake_case_ = factor else: snake_case_ = prime yield prime prime += 1 def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 1E10 ): snake_case_ = sieve() snake_case_ = 1 while True: snake_case_ = next(SCREAMING_SNAKE_CASE__ ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(SCREAMING_SNAKE_CASE__ ) n += 2 if __name__ == "__main__": print(solution())
8
1
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class snake_case_ : '''simple docstring''' def __init__( self : Union[str, Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int]=1_3 , _UpperCamelCase : Union[str, Any]=3_0 , _UpperCamelCase : str=2 , _UpperCamelCase : Optional[int]=3 , _UpperCamelCase : Dict=True , _UpperCamelCase : Dict=True , _UpperCamelCase : List[Any]=3_2 , _UpperCamelCase : Optional[Any]=2 , _UpperCamelCase : List[Any]=4 , _UpperCamelCase : Optional[Any]=3_7 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : List[Any]=1_0 , _UpperCamelCase : List[str]=0.02 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : Union[str, Any]=0.6 , _UpperCamelCase : str=None , ) ->Any: snake_case_ = parent snake_case_ = batch_size snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = is_training snake_case_ = use_labels snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = mask_ratio snake_case_ = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) snake_case_ = (image_size // patch_size) ** 2 snake_case_ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def snake_case__( self : List[str] ) ->Optional[Any]: snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ = self.get_config() return config, pixel_values, labels def snake_case__( self : Optional[int] ) ->str: return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def snake_case__( self : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) ->Union[str, Any]: snake_case_ = TFViTMAEModel(config=_UpperCamelCase ) snake_case_ = model(_UpperCamelCase , training=_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case__( self : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] ) ->Optional[int]: snake_case_ = TFViTMAEForPreTraining(_UpperCamelCase ) snake_case_ = model(_UpperCamelCase , training=_UpperCamelCase ) # expected sequence length = num_patches snake_case_ = (self.image_size // self.patch_size) ** 2 snake_case_ = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images snake_case_ = 1 snake_case_ = TFViTMAEForPreTraining(_UpperCamelCase ) snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ = model(_UpperCamelCase , training=_UpperCamelCase ) snake_case_ = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def snake_case__( self : Union[str, Any] ) ->Dict: snake_case_ = self.prepare_config_and_inputs() ((snake_case_), (snake_case_), (snake_case_)) = config_and_inputs snake_case_ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class snake_case_ ( __A , __A , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () SCREAMING_SNAKE_CASE : Tuple = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {} SCREAMING_SNAKE_CASE : Union[str, Any] = False SCREAMING_SNAKE_CASE : Optional[Any] = False SCREAMING_SNAKE_CASE : Union[str, Any] = False SCREAMING_SNAKE_CASE : Union[str, Any] = False def snake_case__( self : Dict ) ->List[Any]: snake_case_ = TFViTMAEModelTester(self ) snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=3_7 ) def snake_case__( self : Dict ) ->int: self.config_tester.run_common_tests() @unittest.skip(reason='''ViTMAE does not use inputs_embeds''' ) def snake_case__( self : Optional[int] ) ->int: pass def snake_case__( self : Optional[Any] ) ->int: snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = model_class(_UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) snake_case_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCamelCase , tf.keras.layers.Layer ) ) def snake_case__( self : Tuple ) ->Union[str, Any]: snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = model_class(_UpperCamelCase ) snake_case_ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ = [*signature.parameters.keys()] snake_case_ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _UpperCamelCase ) def snake_case__( self : Dict ) ->int: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def snake_case__( self : List[str] ) ->Dict: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_UpperCamelCase ) def snake_case__( self : Any ) ->Optional[Any]: # make the mask reproducible np.random.seed(2 ) snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ = int((config.image_size // config.patch_size) ** 2 ) snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: snake_case_ = model_class(_UpperCamelCase ) snake_case_ = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) snake_case_ = model(_UpperCamelCase , noise=_UpperCamelCase ) snake_case_ = copy.deepcopy(self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) ) snake_case_ = model(**_UpperCamelCase , noise=_UpperCamelCase ) snake_case_ = outputs_dict[0].numpy() snake_case_ = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 ) def snake_case__( self : Optional[int] ) ->Optional[Any]: # make the mask reproducible np.random.seed(2 ) snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ = int((config.image_size // config.patch_size) ** 2 ) snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(_UpperCamelCase : str ): snake_case_ = {} for k, v in inputs_dict.items(): if tf.is_tensor(_UpperCamelCase ): snake_case_ = v.numpy() else: snake_case_ = np.array(_UpperCamelCase ) return inputs_np_dict for model_class in self.all_model_classes: snake_case_ = model_class(_UpperCamelCase ) snake_case_ = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) snake_case_ = prepare_numpy_arrays(_UpperCamelCase ) snake_case_ = model(_UpperCamelCase , noise=_UpperCamelCase ) snake_case_ = model(**_UpperCamelCase , noise=_UpperCamelCase ) self.assert_outputs_same(_UpperCamelCase , _UpperCamelCase ) def snake_case__( self : Any , _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : Dict ) ->Dict: # make masks reproducible np.random.seed(2 ) snake_case_ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) snake_case_ = tf.constant(_UpperCamelCase ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument snake_case_ = tf_noise super().check_pt_tf_models(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) def snake_case__( self : Optional[int] ) ->str: # make mask reproducible np.random.seed(2 ) snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(_UpperCamelCase ) if module_member_name.endswith('''MainLayer''' ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )] for module_member in (getattr(_UpperCamelCase , _UpperCamelCase ),) if isinstance(_UpperCamelCase , _UpperCamelCase ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(_UpperCamelCase , '''_keras_serializable''' , _UpperCamelCase ) } snake_case_ = int((config.image_size // config.patch_size) ** 2 ) snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) snake_case_ = tf.convert_to_tensor(_UpperCamelCase ) inputs_dict.update({'''noise''': noise} ) for main_layer_class in tf_main_layer_classes: snake_case_ = main_layer_class(_UpperCamelCase ) snake_case_ = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } snake_case_ = tf.keras.Model(_UpperCamelCase , outputs=main_layer(_UpperCamelCase ) ) snake_case_ = model(_UpperCamelCase ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ = os.path.join(_UpperCamelCase , '''keras_model.h5''' ) model.save(_UpperCamelCase ) snake_case_ = tf.keras.models.load_model( _UpperCamelCase , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(_UpperCamelCase , tf.keras.Model ) snake_case_ = model(_UpperCamelCase ) self.assert_outputs_same(_UpperCamelCase , _UpperCamelCase ) @slow def snake_case__( self : List[Any] ) ->str: # make mask reproducible np.random.seed(2 ) snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ = int((config.image_size // config.patch_size) ** 2 ) snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: snake_case_ = model_class(_UpperCamelCase ) snake_case_ = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) snake_case_ = model(_UpperCamelCase , noise=_UpperCamelCase ) if model_class.__name__ == "TFViTMAEModel": snake_case_ = outputs.last_hidden_state.numpy() snake_case_ = 0 else: snake_case_ = outputs.logits.numpy() snake_case_ = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_UpperCamelCase , saved_model=_UpperCamelCase ) snake_case_ = model_class.from_pretrained(_UpperCamelCase ) snake_case_ = model(_UpperCamelCase , noise=_UpperCamelCase ) if model_class.__name__ == "TFViTMAEModel": snake_case_ = after_outputs['''last_hidden_state'''].numpy() snake_case_ = 0 else: snake_case_ = after_outputs['''logits'''].numpy() snake_case_ = 0 snake_case_ = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_UpperCamelCase , 1e-5 ) def snake_case__( self : int ) ->List[str]: # make mask reproducible np.random.seed(2 ) snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ = int((config.image_size // config.patch_size) ** 2 ) snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: snake_case_ = model_class(_UpperCamelCase ) snake_case_ = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) snake_case_ = model(_UpperCamelCase , noise=_UpperCamelCase ) snake_case_ = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(_UpperCamelCase ) snake_case_ = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config snake_case_ = model_class.from_config(model.config ) snake_case_ = new_model(_UpperCamelCase ) # Build model new_model.set_weights(model.get_weights() ) snake_case_ = new_model(_UpperCamelCase , noise=_UpperCamelCase ) self.assert_outputs_same(_UpperCamelCase , _UpperCamelCase ) @unittest.skip( reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.''' ) def snake_case__( self : Any ) ->List[Any]: pass @unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' ) def snake_case__( self : Optional[int] ) ->Optional[int]: pass @slow def snake_case__( self : Any ) ->List[str]: snake_case_ = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' ) self.assertIsNotNone(_UpperCamelCase ) def __SCREAMING_SNAKE_CASE (): snake_case_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class snake_case_ ( unittest.TestCase ): '''simple docstring''' @cached_property def snake_case__( self : Optional[int] ) ->Any: return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None @slow def snake_case__( self : Dict ) ->str: # make random mask reproducible across the PT and TF model np.random.seed(2 ) snake_case_ = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ) snake_case_ = self.default_image_processor snake_case_ = prepare_img() snake_case_ = image_processor(images=_UpperCamelCase , return_tensors='''tf''' ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) snake_case_ = ViTMAEConfig() snake_case_ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) snake_case_ = np.random.uniform(size=(1, num_patches) ) # forward pass snake_case_ = model(**_UpperCamelCase , noise=_UpperCamelCase ) # verify the logits snake_case_ = tf.convert_to_tensor([1, 1_9_6, 7_6_8] ) self.assertEqual(outputs.logits.shape , _UpperCamelCase ) snake_case_ = tf.convert_to_tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , _UpperCamelCase , atol=1e-4 )
8
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OPTForCausalLM''', '''OPTModel''', '''OPTPreTrainedModel''', '''OPTForSequenceClassification''', '''OPTForQuestionAnswering''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''FlaxOPTForCausalLM''', '''FlaxOPTModel''', '''FlaxOPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
8
1
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { '''configuration_informer''': [ '''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InformerForPrediction''', '''InformerModel''', '''InformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = "philschmid/bart-large-cnn-samsum" SCREAMING_SNAKE_CASE : Tuple = ( "This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, " "and returns a summary of the text." ) SCREAMING_SNAKE_CASE : str = "summarizer" SCREAMING_SNAKE_CASE : str = AutoTokenizer SCREAMING_SNAKE_CASE : str = AutoModelForSeqaSeqLM SCREAMING_SNAKE_CASE : Optional[int] = ["text"] SCREAMING_SNAKE_CASE : Optional[int] = ["text"] def snake_case__( self : str , _UpperCamelCase : int ) ->Optional[int]: return self.pre_processor(_UpperCamelCase , return_tensors='''pt''' , truncation=_UpperCamelCase ) def snake_case__( self : Tuple , _UpperCamelCase : Optional[int] ) ->Tuple: return self.model.generate(**_UpperCamelCase )[0] def snake_case__( self : Optional[Any] , _UpperCamelCase : Optional[int] ) ->Any: return self.pre_processor.decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
8
1
from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class snake_case_ : '''simple docstring''' def __init__( self : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : str=1_2 , _UpperCamelCase : Optional[Any]=7 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Dict=9_9 , _UpperCamelCase : Union[str, Any]=3_2 , _UpperCamelCase : Dict=3_2 , _UpperCamelCase : Dict=2 , _UpperCamelCase : Tuple=4 , _UpperCamelCase : Optional[int]=3_7 , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Optional[Any]=5_1_2 , _UpperCamelCase : Tuple=0.02 , _UpperCamelCase : Union[str, Any]=0 , _UpperCamelCase : Optional[int]=None , ) ->int: snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_input_mask snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = projection_dim snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = dropout snake_case_ = attention_dropout snake_case_ = max_position_embeddings snake_case_ = initializer_range snake_case_ = scope snake_case_ = bos_token_id def snake_case__( self : List[Any] ) ->Union[str, Any]: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_input_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: snake_case_ = input_mask.numpy() snake_case_, snake_case_ = input_mask.shape snake_case_ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(_UpperCamelCase ): snake_case_ = 1 snake_case_ = 0 snake_case_ = self.get_config() return config, input_ids, tf.convert_to_tensor(_UpperCamelCase ) def snake_case__( self : str ) ->Optional[int]: return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def snake_case__( self : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict ) ->Any: snake_case_ = TFBlipTextModel(config=_UpperCamelCase ) snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , training=_UpperCamelCase ) snake_case_ = model(_UpperCamelCase , training=_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def snake_case__( self : str ) ->Union[str, Any]: snake_case_ = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_ = config_and_inputs snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class snake_case_ ( __A , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = (TFBlipTextModel,) if is_tf_available() else () SCREAMING_SNAKE_CASE : Any = False SCREAMING_SNAKE_CASE : Optional[Any] = False SCREAMING_SNAKE_CASE : str = False def snake_case__( self : Union[str, Any] ) ->Dict: snake_case_ = BlipTextModelTester(self ) snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 ) def snake_case__( self : int ) ->Union[str, Any]: self.config_tester.run_common_tests() def snake_case__( self : Dict ) ->Any: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def snake_case__( self : Any ) ->int: pass def snake_case__( self : int ) ->List[Any]: pass @unittest.skip(reason='''Blip does not use inputs_embeds''' ) def snake_case__( self : Optional[int] ) ->Dict: pass @unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' ) def snake_case__( self : Tuple ) ->int: pass @unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' ) def snake_case__( self : int ) ->int: pass @slow def snake_case__( self : str ) ->str: for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ = TFBlipTextModel.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) def snake_case__( self : List[str] , _UpperCamelCase : Tuple=True ) ->Optional[int]: super().test_pt_tf_model_equivalence(allow_missing_keys=_UpperCamelCase )
8
from collections import deque from .hash_table import HashTable class snake_case_ ( __A ): '''simple docstring''' def __init__( self : int , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->Tuple: super().__init__(*_UpperCamelCase , **_UpperCamelCase ) def snake_case__( self : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Dict ) ->Tuple: snake_case_ = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(_UpperCamelCase ) snake_case_ = self.values[key] def snake_case__( self : List[Any] ) ->str: return ( sum(self.charge_factor - len(_UpperCamelCase ) for slot in self.values ) / self.size_table * self.charge_factor ) def snake_case__( self : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int]=None ) ->str: if not ( len(self.values[key] ) == self.charge_factor and self.values.count(_UpperCamelCase ) == 0 ): return key return super()._collision_resolution(_UpperCamelCase , _UpperCamelCase )
8
1
from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) lowerCAmelCase_ = ''' transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions. ''' class snake_case_ ( __A ): '''simple docstring''' @staticmethod def snake_case__( _UpperCamelCase : ArgumentParser ) ->Union[str, Any]: snake_case_ = parser.add_parser( '''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , ) train_parser.add_argument('''--model_type''' , type=_UpperCamelCase , required=_UpperCamelCase , help='''Model\'s type.''' ) train_parser.add_argument( '''--tf_checkpoint''' , type=_UpperCamelCase , required=_UpperCamelCase , help='''TensorFlow checkpoint path or folder.''' ) train_parser.add_argument( '''--pytorch_dump_output''' , type=_UpperCamelCase , required=_UpperCamelCase , help='''Path to the PyTorch saved model output.''' ) train_parser.add_argument('''--config''' , type=_UpperCamelCase , default='''''' , help='''Configuration file path or folder.''' ) train_parser.add_argument( '''--finetuning_task_name''' , type=_UpperCamelCase , default=_UpperCamelCase , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , ) train_parser.set_defaults(func=_UpperCamelCase ) def __init__( self : Dict , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : str , *_UpperCamelCase : Any , ) ->str: snake_case_ = logging.get_logger('''transformers-cli/converting''' ) self._logger.info(f'''Loading model {model_type}''' ) snake_case_ = model_type snake_case_ = tf_checkpoint snake_case_ = pytorch_dump_output snake_case_ = config snake_case_ = finetuning_task_name def snake_case__( self : Tuple ) ->Optional[Any]: if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_UpperCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_UpperCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_UpperCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(_UpperCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_UpperCamelCase ) if "ckpt" in self._tf_checkpoint.lower(): snake_case_ = self._tf_checkpoint snake_case_ = '''''' else: snake_case_ = self._tf_checkpoint snake_case_ = '''''' convert_transfo_xl_checkpoint_to_pytorch( _UpperCamelCase , self._config , self._pytorch_dump_output , _UpperCamelCase ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_UpperCamelCase ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_UpperCamelCase ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) else: raise ValueError( '''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
8
from __future__ import annotations def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = len(SCREAMING_SNAKE_CASE__ ) # We need to create solution object to save path. snake_case_ = [[0 for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )] snake_case_ = run_maze(SCREAMING_SNAKE_CASE__ , 0 , 0 , SCREAMING_SNAKE_CASE__ ) if solved: print('''\n'''.join(str(SCREAMING_SNAKE_CASE__ ) for row in solutions ) ) else: print('''No solution exists!''' ) return solved def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = len(SCREAMING_SNAKE_CASE__ ) # Final check point. if i == j == (size - 1): snake_case_ = 1 return True snake_case_ = (not i < 0) and (not j < 0) # Check lower bounds snake_case_ = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. snake_case_ = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited snake_case_ = 1 # check for directions if ( run_maze(SCREAMING_SNAKE_CASE__ , i + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j + 1 , SCREAMING_SNAKE_CASE__ ) or run_maze(SCREAMING_SNAKE_CASE__ , i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j - 1 , SCREAMING_SNAKE_CASE__ ) ): return True snake_case_ = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
8
1
from ..utils import DummyObject, requires_backends class snake_case_ ( metaclass=__A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = ["flax", "transformers"] def __init__( self : Optional[int] , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->Any: requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def snake_case__( cls : List[str] , *_UpperCamelCase : int , **_UpperCamelCase : str ) ->Any: requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def snake_case__( cls : Union[str, Any] , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : Any ) ->Dict: requires_backends(cls , ['''flax''', '''transformers'''] ) class snake_case_ ( metaclass=__A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = ["flax", "transformers"] def __init__( self : Any , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : Optional[int] ) ->Dict: requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def snake_case__( cls : str , *_UpperCamelCase : List[Any] , **_UpperCamelCase : int ) ->str: requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def snake_case__( cls : str , *_UpperCamelCase : List[Any] , **_UpperCamelCase : Dict ) ->Union[str, Any]: requires_backends(cls , ['''flax''', '''transformers'''] ) class snake_case_ ( metaclass=__A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = ["flax", "transformers"] def __init__( self : Union[str, Any] , *_UpperCamelCase : Dict , **_UpperCamelCase : Tuple ) ->List[str]: requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def snake_case__( cls : Optional[int] , *_UpperCamelCase : str , **_UpperCamelCase : str ) ->Union[str, Any]: requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def snake_case__( cls : Any , *_UpperCamelCase : Any , **_UpperCamelCase : List[str] ) ->int: requires_backends(cls , ['''flax''', '''transformers'''] ) class snake_case_ ( metaclass=__A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = ["flax", "transformers"] def __init__( self : Tuple , *_UpperCamelCase : str , **_UpperCamelCase : int ) ->Any: requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def snake_case__( cls : List[Any] , *_UpperCamelCase : List[str] , **_UpperCamelCase : Union[str, Any] ) ->List[Any]: requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def snake_case__( cls : Union[str, Any] , *_UpperCamelCase : str , **_UpperCamelCase : List[Any] ) ->List[Any]: requires_backends(cls , ['''flax''', '''transformers'''] )
8
from decimal import Decimal, getcontext from math import ceil, factorial def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise TypeError('''Undefined for non-integers''' ) elif precision < 1: raise ValueError('''Undefined for non-natural numbers''' ) snake_case_ = precision snake_case_ = ceil(precision / 14 ) snake_case_ = 426880 * Decimal(10005 ).sqrt() snake_case_ = 1 snake_case_ = 13591409 snake_case_ = Decimal(SCREAMING_SNAKE_CASE__ ) for k in range(1 , SCREAMING_SNAKE_CASE__ ): snake_case_ = factorial(6 * k ) // (factorial(3 * k ) * factorial(SCREAMING_SNAKE_CASE__ ) ** 3) linear_term += 545140134 exponential_term *= -262537412640768000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": lowerCAmelCase_ = 50 print(f"""The first {n} digits of pi is: {pi(n)}""")
8
1
import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ): # set parameter of one layer assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match''' snake_case_ = nn.Parameter(SCREAMING_SNAKE_CASE__ ) if bias is not None: assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match''' snake_case_ = nn.Parameter(SCREAMING_SNAKE_CASE__ ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # set torch weights for 1-to-1 comparison snake_case_ = np.asarray(weights[0] ) snake_case_ = np.asarray(weights[1] ) snake_case_ = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , ) set_param( torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE__ ).view(-1 , SCREAMING_SNAKE_CASE__ ).contiguous().transpose(0 , 1 ) , ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # set torch weights for 1-to-1 comparison snake_case_ = np.asarray(weights[0] ) snake_case_ = np.asarray(weights[1] ) snake_case_ = np.asarray(weights[2] ) snake_case_ = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , ) set_param( torch_layer.self_attention.key , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , ) set_param( torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE__ ).view(-1 , SCREAMING_SNAKE_CASE__ ).contiguous().transpose(0 , 1 ) , ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # layernorm 1 snake_case_ = weights[0][0][0] snake_case_ = np.asarray(layer_norm_a[0] ) snake_case_ = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE__ ) , torch.tensor(SCREAMING_SNAKE_CASE__ ) , ) # lsh weights + output snake_case_ = weights[0][1] if len(SCREAMING_SNAKE_CASE__ ) < 4: set_layer_weights_in_torch_lsh(SCREAMING_SNAKE_CASE__ , torch_block.attention , SCREAMING_SNAKE_CASE__ ) else: set_layer_weights_in_torch_local(SCREAMING_SNAKE_CASE__ , torch_block.attention , SCREAMING_SNAKE_CASE__ ) # intermediate weighs snake_case_ = weights[2][0][1][2] # Chunked Feed Forward if len(SCREAMING_SNAKE_CASE__ ) == 4: snake_case_ = intermediate_weights[2] # layernorm 2 snake_case_ = np.asarray(intermediate_weights[0][0] ) snake_case_ = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE__ ) , torch.tensor(SCREAMING_SNAKE_CASE__ ) , ) # intermediate dense snake_case_ = np.asarray(intermediate_weights[1][0] ) snake_case_ = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE__ ) , ) # intermediate out snake_case_ = np.asarray(intermediate_weights[4][0] ) snake_case_ = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE__ ) , ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # reformer model snake_case_ = torch_model.reformer # word embeds snake_case_ = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(SCREAMING_SNAKE_CASE__ ) , ) if isinstance(weights[3] , SCREAMING_SNAKE_CASE__ ): snake_case_ = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): snake_case_ = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), F'''{position_embeddings[emb_idx]} emb does not match''' snake_case_ = nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE__ ) ) snake_case_ = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( SCREAMING_SNAKE_CASE__ ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): snake_case_ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # output layer norm snake_case_ = np.asarray(weights[7][0] ) snake_case_ = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE__ ) , torch.tensor(SCREAMING_SNAKE_CASE__ ) , ) # output embeddings snake_case_ = np.asarray(weights[9][0] ) snake_case_ = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE__ ) , ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # Initialise PyTorch model snake_case_ = ReformerConfig.from_json_file(SCREAMING_SNAKE_CASE__ ) print(F'''Building PyTorch model from configuration: {config}''' ) snake_case_ = ReformerModelWithLMHead(SCREAMING_SNAKE_CASE__ ) with open(SCREAMING_SNAKE_CASE__ , '''rb''' ) as f: snake_case_ = pickle.load(SCREAMING_SNAKE_CASE__ )['''weights'''] set_model_weights_in_torch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , config.hidden_size ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained Reformer model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase_ = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
8
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class snake_case_ ( __A ): '''simple docstring''' def __init__( self : int , _UpperCamelCase : pyspark.sql.DataFrame , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : bool = True , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : str = None , _UpperCamelCase : bool = True , _UpperCamelCase : str = "arrow" , **_UpperCamelCase : Tuple , ) ->str: super().__init__( split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , **_UpperCamelCase , ) snake_case_ = load_from_cache_file snake_case_ = file_format snake_case_ = Spark( df=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , working_dir=_UpperCamelCase , **_UpperCamelCase , ) def snake_case__( self : int ) ->Tuple: if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) snake_case_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=_UpperCamelCase , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
8
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class snake_case_ ( unittest.TestCase ): '''simple docstring''' @slow def snake_case__( self : Optional[int] ) ->int: snake_case_ = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' ) snake_case_ = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house snake_case_ = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim snake_case_ = torch.tensor( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): snake_case_ = model(_UpperCamelCase )['''last_hidden_state'''].detach() self.assertEqual(output.shape , _UpperCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1e-3 ) ) @slow def snake_case__( self : Union[str, Any] ) ->Tuple: snake_case_ = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' ) snake_case_ = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house snake_case_ = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim snake_case_ = torch.tensor( [[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): snake_case_ = model(_UpperCamelCase )['''last_hidden_state'''].detach() self.assertEqual(output.shape , _UpperCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1e-3 ) )
8
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable lowerCAmelCase_ = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''DPTFeatureExtractor'''] lowerCAmelCase_ = ['''DPTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DPTForDepthEstimation''', '''DPTForSemanticSegmentation''', '''DPTModel''', '''DPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
8
1
import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class snake_case_ ( __A , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = CpmAntTokenizer SCREAMING_SNAKE_CASE : Union[str, Any] = False def snake_case__( self : Union[str, Any] ) ->Optional[int]: super().setUp() snake_case_ = [ '''<d>''', '''</d>''', '''<s>''', '''</s>''', '''</_>''', '''<unk>''', '''<pad>''', '''</n>''', '''我''', '''是''', '''C''', '''P''', '''M''', '''A''', '''n''', '''t''', ] snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) @tooslow def snake_case__( self : List[Any] ) ->List[str]: snake_case_ = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' ) snake_case_ = '''今天天气真好!''' snake_case_ = ['''今天''', '''天气''', '''真''', '''好''', '''!'''] snake_case_ = tokenizer.tokenize(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , _UpperCamelCase ) snake_case_ = '''今天天气真好!''' snake_case_ = [tokenizer.bos_token] + tokens snake_case_ = [6, 9_8_0_2, 1_4_9_6_2, 2_0_8_2, 8_3_1, 2_4_4] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , _UpperCamelCase ) snake_case_ = tokenizer.decode(_UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase )
8
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer lowerCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase_ = { '''vocab_file''': { '''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''', }, '''tokenizer_file''': { '''unc-nlp/lxmert-base-uncased''': ( '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase_ = { '''unc-nlp/lxmert-base-uncased''': 5_12, } lowerCAmelCase_ = { '''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True}, } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : Any = LxmertTokenizer def __init__( self : Union[str, Any] , _UpperCamelCase : int=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Dict=True , _UpperCamelCase : Any="[UNK]" , _UpperCamelCase : Tuple="[SEP]" , _UpperCamelCase : List[Any]="[PAD]" , _UpperCamelCase : Union[str, Any]="[CLS]" , _UpperCamelCase : str="[MASK]" , _UpperCamelCase : List[str]=True , _UpperCamelCase : List[str]=None , **_UpperCamelCase : List[str] , ) ->Any: super().__init__( _UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , ) snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _UpperCamelCase ) != do_lower_case or normalizer_state.get('''strip_accents''' , _UpperCamelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _UpperCamelCase ) != tokenize_chinese_chars ): snake_case_ = getattr(_UpperCamelCase , normalizer_state.pop('''type''' ) ) snake_case_ = do_lower_case snake_case_ = strip_accents snake_case_ = tokenize_chinese_chars snake_case_ = normalizer_class(**_UpperCamelCase ) snake_case_ = do_lower_case def snake_case__( self : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=None ) ->List[Any]: snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case__( self : Any , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]: snake_case_ = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase ) return tuple(_UpperCamelCase )
8
1
import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''', # See all DETR models at https://huggingface.co/models?filter=detr } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = "detr" SCREAMING_SNAKE_CASE : Optional[int] = ["past_key_values"] SCREAMING_SNAKE_CASE : Dict = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self : List[Any] , _UpperCamelCase : Tuple=True , _UpperCamelCase : str=None , _UpperCamelCase : Union[str, Any]=3 , _UpperCamelCase : Optional[Any]=1_0_0 , _UpperCamelCase : Any=6 , _UpperCamelCase : Tuple=2_0_4_8 , _UpperCamelCase : Any=8 , _UpperCamelCase : List[Any]=6 , _UpperCamelCase : Dict=2_0_4_8 , _UpperCamelCase : Tuple=8 , _UpperCamelCase : List[str]=0.0 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : str=True , _UpperCamelCase : str="relu" , _UpperCamelCase : Optional[int]=2_5_6 , _UpperCamelCase : str=0.1 , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Any=0.0 , _UpperCamelCase : str=0.02 , _UpperCamelCase : Optional[Any]=1.0 , _UpperCamelCase : int=False , _UpperCamelCase : str="sine" , _UpperCamelCase : Union[str, Any]="resnet50" , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : int=False , _UpperCamelCase : List[Any]=1 , _UpperCamelCase : Optional[int]=5 , _UpperCamelCase : Optional[Any]=2 , _UpperCamelCase : str=1 , _UpperCamelCase : str=1 , _UpperCamelCase : Optional[int]=5 , _UpperCamelCase : Tuple=2 , _UpperCamelCase : str=0.1 , **_UpperCamelCase : Any , ) ->Optional[int]: if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) snake_case_ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(_UpperCamelCase , _UpperCamelCase ): snake_case_ = backbone_config.get('''model_type''' ) snake_case_ = CONFIG_MAPPING[backbone_model_type] snake_case_ = config_class.from_dict(_UpperCamelCase ) # set timm attributes to None snake_case_, snake_case_, snake_case_ = None, None, None snake_case_ = use_timm_backbone snake_case_ = backbone_config snake_case_ = num_channels snake_case_ = num_queries snake_case_ = d_model snake_case_ = encoder_ffn_dim snake_case_ = encoder_layers snake_case_ = encoder_attention_heads snake_case_ = decoder_ffn_dim snake_case_ = decoder_layers snake_case_ = decoder_attention_heads snake_case_ = dropout snake_case_ = attention_dropout snake_case_ = activation_dropout snake_case_ = activation_function snake_case_ = init_std snake_case_ = init_xavier_std snake_case_ = encoder_layerdrop snake_case_ = decoder_layerdrop snake_case_ = encoder_layers snake_case_ = auxiliary_loss snake_case_ = position_embedding_type snake_case_ = backbone snake_case_ = use_pretrained_backbone snake_case_ = dilation # Hungarian matcher snake_case_ = class_cost snake_case_ = bbox_cost snake_case_ = giou_cost # Loss coefficients snake_case_ = mask_loss_coefficient snake_case_ = dice_loss_coefficient snake_case_ = bbox_loss_coefficient snake_case_ = giou_loss_coefficient snake_case_ = eos_coefficient super().__init__(is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase ) @property def snake_case__( self : str ) ->int: return self.encoder_attention_heads @property def snake_case__( self : List[str] ) ->int: return self.d_model @classmethod def snake_case__( cls : List[str] , _UpperCamelCase : PretrainedConfig , **_UpperCamelCase : Union[str, Any] ) ->Optional[int]: return cls(backbone_config=_UpperCamelCase , **_UpperCamelCase ) def snake_case__( self : Any ) ->Dict[str, any]: snake_case_ = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: snake_case_ = self.backbone_config.to_dict() snake_case_ = self.__class__.model_type return output class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = version.parse("1.11" ) @property def snake_case__( self : List[Any] ) ->Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ] ) @property def snake_case__( self : Optional[Any] ) ->float: return 1e-5 @property def snake_case__( self : List[str] ) ->int: return 1_2
8
import math def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 10001 ): try: snake_case_ = int(SCREAMING_SNAKE_CASE__ ) except (TypeError, ValueError): raise TypeError('''Parameter nth must be int or castable to int.''' ) from None if nth <= 0: raise ValueError('''Parameter nth must be greater than or equal to one.''' ) snake_case_ = [] snake_case_ = 2 while len(SCREAMING_SNAKE_CASE__ ) < nth: if is_prime(SCREAMING_SNAKE_CASE__ ): primes.append(SCREAMING_SNAKE_CASE__ ) num += 1 else: num += 1 return primes[len(SCREAMING_SNAKE_CASE__ ) - 1] if __name__ == "__main__": print(f"""{solution() = }""")
8
1
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = "isbn/0140328726" ): snake_case_ = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes if new_olid.count('''/''' ) != 1: snake_case_ = F'''{olid} is not a valid Open Library olid''' raise ValueError(SCREAMING_SNAKE_CASE__ ) return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json() def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = { '''title''': '''Title''', '''publish_date''': '''Publish date''', '''authors''': '''Authors''', '''number_of_pages''': '''Number of pages:''', '''first_sentence''': '''First sentence''', '''isbn_10''': '''ISBN (10)''', '''isbn_13''': '''ISBN (13)''', } snake_case_ = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} snake_case_ = [ get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors'''] ] snake_case_ = data['''First sentence''']['''value'''] for key, value in data.items(): if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = ''', '''.join(SCREAMING_SNAKE_CASE__ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: lowerCAmelCase_ = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(f"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""") continue print(f"""\nSearching Open Library for ISBN: {isbn}...\n""") try: lowerCAmelCase_ = summarize_book(get_openlibrary_data(f"""isbn/{isbn}""")) print('''\n'''.join(f"""{key}: {value}""" for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(f"""Sorry, there are no results for ISBN: {isbn}.""")
8
from sklearn.metrics import mean_squared_error import datasets lowerCAmelCase_ = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' lowerCAmelCase_ = '''\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. ''' lowerCAmelCase_ = ''' Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. "raw_values" : Returns a full set of errors in case of multioutput input. "uniform_average" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric("mse") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {\'mse\': 0.6123724356957945} If you\'re using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric("mse", "multilist") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {\'mse\': array([0.41666667, 1. ])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case_ ( datasets.Metric ): '''simple docstring''' def snake_case__( self : Optional[int] ) ->List[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def snake_case__( self : List[Any] ) ->Optional[int]: if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''' ) ), "references": datasets.Sequence(datasets.Value('''float''' ) ), } else: return { "predictions": datasets.Value('''float''' ), "references": datasets.Value('''float''' ), } def snake_case__( self : int , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any=None , _UpperCamelCase : Optional[int]="uniform_average" , _UpperCamelCase : Tuple=True ) ->Tuple: snake_case_ = mean_squared_error( _UpperCamelCase , _UpperCamelCase , sample_weight=_UpperCamelCase , multioutput=_UpperCamelCase , squared=_UpperCamelCase ) return {"mse": mse}
8
1
from __future__ import annotations def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = len(SCREAMING_SNAKE_CASE__ ) # We need to create solution object to save path. snake_case_ = [[0 for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )] snake_case_ = run_maze(SCREAMING_SNAKE_CASE__ , 0 , 0 , SCREAMING_SNAKE_CASE__ ) if solved: print('''\n'''.join(str(SCREAMING_SNAKE_CASE__ ) for row in solutions ) ) else: print('''No solution exists!''' ) return solved def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = len(SCREAMING_SNAKE_CASE__ ) # Final check point. if i == j == (size - 1): snake_case_ = 1 return True snake_case_ = (not i < 0) and (not j < 0) # Check lower bounds snake_case_ = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. snake_case_ = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited snake_case_ = 1 # check for directions if ( run_maze(SCREAMING_SNAKE_CASE__ , i + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j + 1 , SCREAMING_SNAKE_CASE__ ) or run_maze(SCREAMING_SNAKE_CASE__ , i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j - 1 , SCREAMING_SNAKE_CASE__ ) ): return True snake_case_ = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
8
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = [] if len(SCREAMING_SNAKE_CASE__ ) == 1: return [nums.copy()] for _ in range(len(SCREAMING_SNAKE_CASE__ ) ): snake_case_ = nums.pop(0 ) snake_case_ = permute(SCREAMING_SNAKE_CASE__ ) for perm in permutations: perm.append(SCREAMING_SNAKE_CASE__ ) result.extend(SCREAMING_SNAKE_CASE__ ) nums.append(SCREAMING_SNAKE_CASE__ ) return result def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): def backtrack(SCREAMING_SNAKE_CASE__ ): if start == len(SCREAMING_SNAKE_CASE__ ) - 1: output.append(nums[:] ) else: for i in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ): snake_case_, snake_case_ = nums[i], nums[start] backtrack(start + 1 ) snake_case_, snake_case_ = nums[i], nums[start] # backtrack snake_case_ = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function lowerCAmelCase_ = permutea([1, 2, 3]) print(res) doctest.testmod()
8
1
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if height >= 1: move_tower(height - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) move_disk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) move_tower(height - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): print('''moving disk from''' , SCREAMING_SNAKE_CASE__ , '''to''' , SCREAMING_SNAKE_CASE__ ) def __SCREAMING_SNAKE_CASE (): snake_case_ = int(input('''Height of hanoi: ''' ).strip() ) move_tower(SCREAMING_SNAKE_CASE__ , '''A''' , '''B''' , '''C''' ) if __name__ == "__main__": main()
8
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''XGLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''XGLMTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XGLMForCausalLM''', '''XGLMModel''', '''XGLMPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''FlaxXGLMForCausalLM''', '''FlaxXGLMModel''', '''FlaxXGLMPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXGLMForCausalLM''', '''TFXGLMModel''', '''TFXGLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
8
1
import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () lowerCAmelCase_ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). lowerCAmelCase_ = [0, 25, 50] lowerCAmelCase_ = [25, 50, 75] lowerCAmelCase_ = fuzz.membership.trimf(X, abca) lowerCAmelCase_ = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. lowerCAmelCase_ = np.ones(75) lowerCAmelCase_ = np.zeros((75,)) # 1. Union = max(µA(x), µB(x)) lowerCAmelCase_ = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) lowerCAmelCase_ = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) lowerCAmelCase_ = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) lowerCAmelCase_ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] lowerCAmelCase_ = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) lowerCAmelCase_ = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] lowerCAmelCase_ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] lowerCAmelCase_ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title('''Young''') plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title('''Middle aged''') plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title('''union''') plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title('''intersection''') plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title('''complement_a''') plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title('''difference a/b''') plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title('''alg_sum''') plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title('''alg_product''') plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title('''bdd_sum''') plt.grid(True) plt.subplot(4, 3, 10) plt.plot(X, bdd_difference) plt.title('''bdd_difference''') plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
8
from ..utils import DummyObject, requires_backends class snake_case_ ( metaclass=__A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = ["note_seq"] def __init__( self : Optional[int] , *_UpperCamelCase : str , **_UpperCamelCase : Optional[int] ) ->Any: requires_backends(self , ['''note_seq'''] ) @classmethod def snake_case__( cls : int , *_UpperCamelCase : Any , **_UpperCamelCase : List[Any] ) ->int: requires_backends(cls , ['''note_seq'''] ) @classmethod def snake_case__( cls : Dict , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Union[str, Any] ) ->List[str]: requires_backends(cls , ['''note_seq'''] )
8
1
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer lowerCAmelCase_ = logging.getLogger(__name__) def __SCREAMING_SNAKE_CASE (): snake_case_ = argparse.ArgumentParser( description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' ) parser.add_argument( '''--dataset_name''' , type=SCREAMING_SNAKE_CASE__ , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , ) parser.add_argument( '''--dataset_config''' , type=SCREAMING_SNAKE_CASE__ , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' ) parser.add_argument( '''--tokenizer_name_or_path''' , type=SCREAMING_SNAKE_CASE__ , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , ) parser.add_argument( '''--shard_size''' , type=SCREAMING_SNAKE_CASE__ , default=1000 , help='''Number of entries to go in a single shard.''' , ) parser.add_argument('''--split''' , type=SCREAMING_SNAKE_CASE__ , default='''train''' , choices=['''train''', '''test''', '''validation'''] ) parser.add_argument( '''--limit''' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help='''Limit the number of shards (used for debugging).''' , ) parser.add_argument( '''--max_length''' , type=SCREAMING_SNAKE_CASE__ , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum''' ''' sequence length that is a multiple of 8.''' , ) parser.add_argument( '''--output_dir''' , default='''tf-tpu''' , type=SCREAMING_SNAKE_CASE__ , help='''Output directory where the TFRecord shards will be saved. If the''' ''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord''' ''' shards will be directly saved to a Google Cloud Storage bucket.''' , ) snake_case_ = parser.parse_args() return args def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): def fn(SCREAMING_SNAKE_CASE__ ): return tokenizer(examples['''text'''] ) return fn def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = [] for i in range(len(tokenized_data['''input_ids'''] ) ): snake_case_ = { '''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ), '''attention_mask''': tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ), } snake_case_ = tf.train.Features(feature=SCREAMING_SNAKE_CASE__ ) snake_case_ = tf.train.Example(features=SCREAMING_SNAKE_CASE__ ) snake_case_ = example.SerializeToString() records.append(SCREAMING_SNAKE_CASE__ ) return records def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: snake_case_ = min(len(SCREAMING_SNAKE_CASE__ ) , args.limit ) snake_case_ = dataset.select(range(SCREAMING_SNAKE_CASE__ ) ) print(F'''Limiting the dataset to {args.limit} entries.''' ) snake_case_ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) snake_case_ = os.path.join(args.output_dir , args.split ) if not os.path.exists(SCREAMING_SNAKE_CASE__ ): os.makedirs(SCREAMING_SNAKE_CASE__ ) else: snake_case_ = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. snake_case_ = tokenize_function(SCREAMING_SNAKE_CASE__ ) snake_case_ = dataset.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , num_proc=4 , remove_columns=['''text'''] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(SCREAMING_SNAKE_CASE__ ): # Concatenate all texts. snake_case_ = {k: sum(examples[k] , [] ) for k in examples.keys()} snake_case_ = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 snake_case_ = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. snake_case_ = { k: [t[i : i + args.max_length] for i in range(0 , SCREAMING_SNAKE_CASE__ , args.max_length )] for k, t in concatenated_examples.items() } return result snake_case_ = dataset_tokenized.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , batch_size=1000 , num_proc=4 ) snake_case_ = 0 snake_case_ = 0 for shard in range(0 , len(SCREAMING_SNAKE_CASE__ ) , args.shard_size ): snake_case_ = grouped_dataset[shard : shard + args.shard_size] snake_case_ = len(dataset_snapshot['''input_ids'''] ) snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' ) snake_case_ = get_serialized_examples(SCREAMING_SNAKE_CASE__ ) with tf.io.TFRecordWriter(SCREAMING_SNAKE_CASE__ ) as out_file: for i in range(len(SCREAMING_SNAKE_CASE__ ) ): snake_case_ = serialized_examples[i] out_file.write(SCREAMING_SNAKE_CASE__ ) print('''Wrote file {} containing {} records'''.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) shard_count += 1 total_records += records_containing with open(F'''split-{args.split}-records-count.txt''' , '''w''' ) as f: print(F'''Total {args.split} records: {total_records}''' , file=SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowerCAmelCase_ = parse_args() main(args)
8
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = "vit_msn" def __init__( self : Dict , _UpperCamelCase : Optional[int]=7_6_8 , _UpperCamelCase : Optional[Any]=1_2 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : str=3_0_7_2 , _UpperCamelCase : Tuple="gelu" , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : List[str]=0.02 , _UpperCamelCase : List[Any]=1e-06 , _UpperCamelCase : Any=2_2_4 , _UpperCamelCase : Optional[Any]=1_6 , _UpperCamelCase : Any=3 , _UpperCamelCase : str=True , **_UpperCamelCase : Any , ) ->int: super().__init__(**_UpperCamelCase ) snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = qkv_bias
8
1
import re def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = re.compile( R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' ) return bool(re.search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) if __name__ == "__main__": lowerCAmelCase_ = '''0094702343221''' print(is_sri_lankan_phone_number(phone))
8
from __future__ import annotations from math import pi, sqrt def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if inductance <= 0: raise ValueError('''Inductance cannot be 0 or negative''' ) elif capacitance <= 0: raise ValueError('''Capacitance cannot be 0 or negative''' ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
8
1
from sklearn.metrics import fa_score import datasets lowerCAmelCase_ = ''' The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) ''' lowerCAmelCase_ = ''' Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`. - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives. - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Examples: Example 1-A simple binary example >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {\'f1\': 0.5} Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results[\'f1\'], 2)) 0.67 Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results[\'f1\'], 2)) 0.35 Example 4-A multiclass example, with different values for the `average` input. >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro") >>> print(round(results[\'f1\'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro") >>> print(round(results[\'f1\'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted") >>> print(round(results[\'f1\'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {\'f1\': array([0.8, 0. , 0. ])} ''' lowerCAmelCase_ = ''' @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case_ ( datasets.Metric ): '''simple docstring''' def snake_case__( self : Tuple ) ->Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ), '''references''': datasets.Sequence(datasets.Value('''int32''' ) ), } if self.config_name == '''multilabel''' else { '''predictions''': datasets.Value('''int32''' ), '''references''': datasets.Value('''int32''' ), } ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , ) def snake_case__( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Optional[int]=1 , _UpperCamelCase : List[str]="binary" , _UpperCamelCase : Tuple=None ) ->Optional[Any]: snake_case_ = fa_score( _UpperCamelCase , _UpperCamelCase , labels=_UpperCamelCase , pos_label=_UpperCamelCase , average=_UpperCamelCase , sample_weight=_UpperCamelCase ) return {"f1": float(_UpperCamelCase ) if score.size == 1 else score}
8
import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return x + 2 class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Optional[Any] ) ->int: snake_case_ = '''x = 3''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result == 3 self.assertDictEqual(_UpperCamelCase , {'''x''': 3} ) snake_case_ = '''x = y''' snake_case_ = {'''y''': 5} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 5, '''y''': 5} ) def snake_case__( self : Dict ) ->Optional[int]: snake_case_ = '''y = add_two(x)''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} ) # Won't work without the tool with CaptureStdout() as out: snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result is None assert "tried to execute add_two" in out.out def snake_case__( self : Union[str, Any] ) ->Dict: snake_case_ = '''x = 3''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result == 3 self.assertDictEqual(_UpperCamelCase , {'''x''': 3} ) def snake_case__( self : Optional[int] ) ->Optional[int]: snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} ) self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} ) def snake_case__( self : Dict ) ->str: snake_case_ = '''x = 3\ny = 5''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} ) def snake_case__( self : str ) ->Tuple: snake_case_ = '''text = f\'This is x: {x}.\'''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''text''': '''This is x: 3.'''} ) def snake_case__( self : Optional[Any] ) ->List[str]: snake_case_ = '''if x <= 3:\n y = 2\nelse:\n y = 5''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 2} ) snake_case_ = {'''x''': 8} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 8, '''y''': 5} ) def snake_case__( self : str ) ->str: snake_case_ = '''test_list = [x, add_two(x)]''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , [3, 5] ) self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} ) def snake_case__( self : Any ) ->List[Any]: snake_case_ = '''y = x''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result == 3 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 3} ) def snake_case__( self : Optional[int] ) ->Dict: snake_case_ = '''test_list = [x, add_two(x)]\ntest_list[1]''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} ) snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} ) def snake_case__( self : Optional[Any] ) ->int: snake_case_ = '''x = 0\nfor i in range(3):\n x = i''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {'''range''': range} , state=_UpperCamelCase ) assert result == 2 self.assertDictEqual(_UpperCamelCase , {'''x''': 2, '''i''': 2} )
8
1
from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''', } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = "efficientnet" def __init__( self : Tuple , _UpperCamelCase : int = 3 , _UpperCamelCase : int = 6_0_0 , _UpperCamelCase : float = 2.0 , _UpperCamelCase : float = 3.1 , _UpperCamelCase : int = 8 , _UpperCamelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , _UpperCamelCase : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , _UpperCamelCase : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , _UpperCamelCase : List[int] = [] , _UpperCamelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , _UpperCamelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , _UpperCamelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , _UpperCamelCase : float = 0.25 , _UpperCamelCase : str = "swish" , _UpperCamelCase : int = 2_5_6_0 , _UpperCamelCase : str = "mean" , _UpperCamelCase : float = 0.02 , _UpperCamelCase : float = 0.001 , _UpperCamelCase : float = 0.99 , _UpperCamelCase : float = 0.5 , _UpperCamelCase : float = 0.2 , **_UpperCamelCase : Optional[Any] , ) ->str: super().__init__(**_UpperCamelCase ) snake_case_ = num_channels snake_case_ = image_size snake_case_ = width_coefficient snake_case_ = depth_coefficient snake_case_ = depth_divisor snake_case_ = kernel_sizes snake_case_ = in_channels snake_case_ = out_channels snake_case_ = depthwise_padding snake_case_ = strides snake_case_ = num_block_repeats snake_case_ = expand_ratios snake_case_ = squeeze_expansion_ratio snake_case_ = hidden_act snake_case_ = hidden_dim snake_case_ = pooling_type snake_case_ = initializer_range snake_case_ = batch_norm_eps snake_case_ = batch_norm_momentum snake_case_ = dropout_rate snake_case_ = drop_connect_rate snake_case_ = sum(_UpperCamelCase ) * 4 class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = version.parse("1.11" ) @property def snake_case__( self : List[str] ) ->Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def snake_case__( self : int ) ->float: return 1e-5
8
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Any , _UpperCamelCase : Any , _UpperCamelCase : Tuple ) ->List[Any]: return f'''gaussian_noise_s={seed}_shape={'_'.join([str(_UpperCamelCase ) for s in shape] )}.npy''' def snake_case__( self : Any ) ->List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() def snake_case__( self : int , _UpperCamelCase : Union[str, Any]=0 , _UpperCamelCase : int=(4, 4, 6_4, 6_4) , _UpperCamelCase : Optional[int]=False ) ->Tuple: snake_case_ = jnp.bfloataa if fpaa else jnp.floataa snake_case_ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase , _UpperCamelCase ) ) , dtype=_UpperCamelCase ) return image def snake_case__( self : List[Any] , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Optional[int]="CompVis/stable-diffusion-v1-4" ) ->Optional[Any]: snake_case_ = jnp.bfloataa if fpaa else jnp.floataa snake_case_ = '''bf16''' if fpaa else None snake_case_, snake_case_ = FlaxUNetaDConditionModel.from_pretrained( _UpperCamelCase , subfolder='''unet''' , dtype=_UpperCamelCase , revision=_UpperCamelCase ) return model, params def snake_case__( self : Dict , _UpperCamelCase : List[Any]=0 , _UpperCamelCase : Tuple=(4, 7_7, 7_6_8) , _UpperCamelCase : List[Any]=False ) ->int: snake_case_ = jnp.bfloataa if fpaa else jnp.floataa snake_case_ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase , _UpperCamelCase ) ) , dtype=_UpperCamelCase ) return hidden_states @parameterized.expand( [ # fmt: off [8_3, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], [1_7, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], [3, 1_0_0_0, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], # fmt: on ] ) def snake_case__( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] ) ->Union[str, Any]: snake_case_, snake_case_ = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=_UpperCamelCase ) snake_case_ = self.get_latents(_UpperCamelCase , fpaa=_UpperCamelCase ) snake_case_ = self.get_encoder_hidden_states(_UpperCamelCase , fpaa=_UpperCamelCase ) snake_case_ = model.apply( {'''params''': params} , _UpperCamelCase , jnp.array(_UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCamelCase , ).sample assert sample.shape == latents.shape snake_case_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) snake_case_ = jnp.array(_UpperCamelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [8_3, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], [1_7, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], [3, 1_0_0_0, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], # fmt: on ] ) def snake_case__( self : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str ) ->Dict: snake_case_, snake_case_ = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=_UpperCamelCase ) snake_case_ = self.get_latents(_UpperCamelCase , shape=(4, 4, 9_6, 9_6) , fpaa=_UpperCamelCase ) snake_case_ = self.get_encoder_hidden_states(_UpperCamelCase , shape=(4, 7_7, 1_0_2_4) , fpaa=_UpperCamelCase ) snake_case_ = model.apply( {'''params''': params} , _UpperCamelCase , jnp.array(_UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCamelCase , ).sample assert sample.shape == latents.shape snake_case_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) snake_case_ = jnp.array(_UpperCamelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-2 )
8
1
from __future__ import annotations def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return [ord(SCREAMING_SNAKE_CASE__ ) - 96 for elem in plain] def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return "".join(chr(elem + 96 ) for elem in encoded ) def __SCREAMING_SNAKE_CASE (): snake_case_ = encode(input('''-> ''' ).strip().lower() ) print('''Encoded: ''' , SCREAMING_SNAKE_CASE__ ) print('''Decoded:''' , decode(SCREAMING_SNAKE_CASE__ ) ) if __name__ == "__main__": main()
8
import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def __SCREAMING_SNAKE_CASE (*SCREAMING_SNAKE_CASE__ ): if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = list(SCREAMING_SNAKE_CASE__ ) for i in range(len(SCREAMING_SNAKE_CASE__ ) ): snake_case_ = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = [ '''CUDA out of memory.''', # CUDA OOM '''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU '''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM ] if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 128 ): if function is None: return functools.partial(SCREAMING_SNAKE_CASE__ , starting_batch_size=SCREAMING_SNAKE_CASE__ ) snake_case_ = starting_batch_size def decorator(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() snake_case_ = list(inspect.signature(SCREAMING_SNAKE_CASE__ ).parameters.keys() ) # Guard against user error if len(SCREAMING_SNAKE_CASE__ ) < (len(SCREAMING_SNAKE_CASE__ ) + 1): snake_case_ = ''', '''.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( F'''Batch size was passed into `{function.__name__}` as the first argument when called.''' F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' ) while True: if batch_size == 0: raise RuntimeError('''No executable batch size found, reached zero.''' ) try: return function(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) except Exception as e: if should_reduce_batch_size(SCREAMING_SNAKE_CASE__ ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
8
1
import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class snake_case_ ( __A , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = AudioLDMPipeline SCREAMING_SNAKE_CASE : str = TEXT_TO_AUDIO_PARAMS SCREAMING_SNAKE_CASE : Optional[int] = TEXT_TO_AUDIO_BATCH_PARAMS SCREAMING_SNAKE_CASE : Dict = frozenset( [ "num_inference_steps", "num_waveforms_per_prompt", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ] ) def snake_case__( self : Dict ) ->str: torch.manual_seed(0 ) snake_case_ = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=(3_2, 6_4) , class_embed_type='''simple_projection''' , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=_UpperCamelCase , ) snake_case_ = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , ) torch.manual_seed(0 ) snake_case_ = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) snake_case_ = ClapTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , ) snake_case_ = ClapTextModelWithProjection(_UpperCamelCase ) snake_case_ = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' , model_max_length=7_7 ) snake_case_ = SpeechTaHifiGanConfig( model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_UpperCamelCase , ) snake_case_ = SpeechTaHifiGan(_UpperCamelCase ) snake_case_ = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''vocoder''': vocoder, } return components def snake_case__( self : str , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any]=0 ) ->str: if str(_UpperCamelCase ).startswith('''mps''' ): snake_case_ = torch.manual_seed(_UpperCamelCase ) else: snake_case_ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase ) snake_case_ = { '''prompt''': '''A hammer hitting a wooden surface''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, } return inputs def snake_case__( self : int ) ->Optional[int]: snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator snake_case_ = self.get_dummy_components() snake_case_ = AudioLDMPipeline(**_UpperCamelCase ) snake_case_ = audioldm_pipe.to(_UpperCamelCase ) audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase ) snake_case_ = self.get_dummy_inputs(_UpperCamelCase ) snake_case_ = audioldm_pipe(**_UpperCamelCase ) snake_case_ = output.audios[0] assert audio.ndim == 1 assert len(_UpperCamelCase ) == 2_5_6 snake_case_ = audio[:1_0] snake_case_ = np.array( [-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def snake_case__( self : List[str] ) ->Any: snake_case_ = self.get_dummy_components() snake_case_ = AudioLDMPipeline(**_UpperCamelCase ) snake_case_ = audioldm_pipe.to(_UpperCamelCase ) snake_case_ = audioldm_pipe.to(_UpperCamelCase ) audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase ) snake_case_ = self.get_dummy_inputs(_UpperCamelCase ) snake_case_ = 3 * [inputs['''prompt''']] # forward snake_case_ = audioldm_pipe(**_UpperCamelCase ) snake_case_ = output.audios[0] snake_case_ = self.get_dummy_inputs(_UpperCamelCase ) snake_case_ = 3 * [inputs.pop('''prompt''' )] snake_case_ = audioldm_pipe.tokenizer( _UpperCamelCase , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_UpperCamelCase , return_tensors='''pt''' , ) snake_case_ = text_inputs['''input_ids'''].to(_UpperCamelCase ) snake_case_ = audioldm_pipe.text_encoder( _UpperCamelCase , ) snake_case_ = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state snake_case_ = F.normalize(_UpperCamelCase , dim=-1 ) snake_case_ = prompt_embeds # forward snake_case_ = audioldm_pipe(**_UpperCamelCase ) snake_case_ = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def snake_case__( self : Any ) ->Union[str, Any]: snake_case_ = self.get_dummy_components() snake_case_ = AudioLDMPipeline(**_UpperCamelCase ) snake_case_ = audioldm_pipe.to(_UpperCamelCase ) snake_case_ = audioldm_pipe.to(_UpperCamelCase ) audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase ) snake_case_ = self.get_dummy_inputs(_UpperCamelCase ) snake_case_ = 3 * ['''this is a negative prompt'''] snake_case_ = negative_prompt snake_case_ = 3 * [inputs['''prompt''']] # forward snake_case_ = audioldm_pipe(**_UpperCamelCase ) snake_case_ = output.audios[0] snake_case_ = self.get_dummy_inputs(_UpperCamelCase ) snake_case_ = 3 * [inputs.pop('''prompt''' )] snake_case_ = [] for p in [prompt, negative_prompt]: snake_case_ = audioldm_pipe.tokenizer( _UpperCamelCase , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_UpperCamelCase , return_tensors='''pt''' , ) snake_case_ = text_inputs['''input_ids'''].to(_UpperCamelCase ) snake_case_ = audioldm_pipe.text_encoder( _UpperCamelCase , ) snake_case_ = text_embeds.text_embeds # additional L_2 normalization over each hidden-state snake_case_ = F.normalize(_UpperCamelCase , dim=-1 ) embeds.append(_UpperCamelCase ) snake_case_, snake_case_ = embeds # forward snake_case_ = audioldm_pipe(**_UpperCamelCase ) snake_case_ = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def snake_case__( self : List[Any] ) ->Optional[int]: snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator snake_case_ = self.get_dummy_components() snake_case_ = PNDMScheduler(skip_prk_steps=_UpperCamelCase ) snake_case_ = AudioLDMPipeline(**_UpperCamelCase ) snake_case_ = audioldm_pipe.to(_UpperCamelCase ) audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase ) snake_case_ = self.get_dummy_inputs(_UpperCamelCase ) snake_case_ = '''egg cracking''' snake_case_ = audioldm_pipe(**_UpperCamelCase , negative_prompt=_UpperCamelCase ) snake_case_ = output.audios[0] assert audio.ndim == 1 assert len(_UpperCamelCase ) == 2_5_6 snake_case_ = audio[:1_0] snake_case_ = np.array( [-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def snake_case__( self : List[Any] ) ->Union[str, Any]: snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator snake_case_ = self.get_dummy_components() snake_case_ = PNDMScheduler(skip_prk_steps=_UpperCamelCase ) snake_case_ = AudioLDMPipeline(**_UpperCamelCase ) snake_case_ = audioldm_pipe.to(_UpperCamelCase ) audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase ) snake_case_ = '''A hammer hitting a wooden surface''' # test num_waveforms_per_prompt=1 (default) snake_case_ = audioldm_pipe(_UpperCamelCase , num_inference_steps=2 ).audios assert audios.shape == (1, 2_5_6) # test num_waveforms_per_prompt=1 (default) for batch of prompts snake_case_ = 2 snake_case_ = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios assert audios.shape == (batch_size, 2_5_6) # test num_waveforms_per_prompt for single prompt snake_case_ = 2 snake_case_ = audioldm_pipe(_UpperCamelCase , num_inference_steps=2 , num_waveforms_per_prompt=_UpperCamelCase ).audios assert audios.shape == (num_waveforms_per_prompt, 2_5_6) # test num_waveforms_per_prompt for batch of prompts snake_case_ = 2 snake_case_ = audioldm_pipe( [prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_UpperCamelCase ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6) def snake_case__( self : Optional[int] ) ->List[str]: snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator snake_case_ = self.get_dummy_components() snake_case_ = AudioLDMPipeline(**_UpperCamelCase ) snake_case_ = audioldm_pipe.to(_UpperCamelCase ) audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase ) snake_case_ = audioldm_pipe.vocoder.config.sampling_rate snake_case_ = self.get_dummy_inputs(_UpperCamelCase ) snake_case_ = audioldm_pipe(audio_length_in_s=0.016 , **_UpperCamelCase ) snake_case_ = output.audios[0] assert audio.ndim == 1 assert len(_UpperCamelCase ) / vocoder_sampling_rate == 0.016 snake_case_ = audioldm_pipe(audio_length_in_s=0.032 , **_UpperCamelCase ) snake_case_ = output.audios[0] assert audio.ndim == 1 assert len(_UpperCamelCase ) / vocoder_sampling_rate == 0.032 def snake_case__( self : str ) ->List[Any]: snake_case_ = self.get_dummy_components() snake_case_ = AudioLDMPipeline(**_UpperCamelCase ) snake_case_ = audioldm_pipe.to(_UpperCamelCase ) audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase ) snake_case_ = ['''hey'''] snake_case_ = audioldm_pipe(_UpperCamelCase , num_inference_steps=1 ) snake_case_ = output.audios.shape assert audio_shape == (1, 2_5_6) snake_case_ = audioldm_pipe.vocoder.config config.model_in_dim *= 2 snake_case_ = SpeechTaHifiGan(_UpperCamelCase ).to(_UpperCamelCase ) snake_case_ = audioldm_pipe(_UpperCamelCase , num_inference_steps=1 ) snake_case_ = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 2_5_6) def snake_case__( self : Optional[Any] ) ->int: self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_UpperCamelCase ) def snake_case__( self : int ) ->Optional[int]: self._test_inference_batch_single_identical(test_mean_pixel_difference=_UpperCamelCase ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def snake_case__( self : Optional[Any] ) ->int: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_UpperCamelCase ) @slow class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Any ) ->str: super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case__( self : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str="cpu" , _UpperCamelCase : List[Any]=torch.floataa , _UpperCamelCase : Any=0 ) ->List[str]: snake_case_ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase ) snake_case_ = np.random.RandomState(_UpperCamelCase ).standard_normal((1, 8, 1_2_8, 1_6) ) snake_case_ = torch.from_numpy(_UpperCamelCase ).to(device=_UpperCamelCase , dtype=_UpperCamelCase ) snake_case_ = { '''prompt''': '''A hammer hitting a wooden surface''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 2.5, } return inputs def snake_case__( self : Any ) ->Union[str, Any]: snake_case_ = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' ) snake_case_ = audioldm_pipe.to(_UpperCamelCase ) audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase ) snake_case_ = self.get_inputs(_UpperCamelCase ) snake_case_ = 2_5 snake_case_ = audioldm_pipe(**_UpperCamelCase ).audios[0] assert audio.ndim == 1 assert len(_UpperCamelCase ) == 8_1_9_2_0 snake_case_ = audio[7_7_2_3_0:7_7_2_4_0] snake_case_ = np.array( [-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] ) snake_case_ = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1e-2 def snake_case__( self : List[Any] ) ->str: snake_case_ = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' ) snake_case_ = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) snake_case_ = audioldm_pipe.to(_UpperCamelCase ) audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase ) snake_case_ = self.get_inputs(_UpperCamelCase ) snake_case_ = audioldm_pipe(**_UpperCamelCase ).audios[0] assert audio.ndim == 1 assert len(_UpperCamelCase ) == 8_1_9_2_0 snake_case_ = audio[2_7_7_8_0:2_7_7_9_0] snake_case_ = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] ) snake_case_ = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3e-2
8
from __future__ import annotations def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return [ord(SCREAMING_SNAKE_CASE__ ) - 96 for elem in plain] def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return "".join(chr(elem + 96 ) for elem in encoded ) def __SCREAMING_SNAKE_CASE (): snake_case_ = encode(input('''-> ''' ).strip().lower() ) print('''Encoded: ''' , SCREAMING_SNAKE_CASE__ ) print('''Decoded:''' , decode(SCREAMING_SNAKE_CASE__ ) ) if __name__ == "__main__": main()
8
1
from __future__ import annotations def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # Checks if the entire collection has been sorted if len(SCREAMING_SNAKE_CASE__ ) <= 1 or n <= 1: return insert_next(SCREAMING_SNAKE_CASE__ , n - 1 ) rec_insertion_sort(SCREAMING_SNAKE_CASE__ , n - 1 ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # Checks order between adjacent elements if index >= len(SCREAMING_SNAKE_CASE__ ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order snake_case_, snake_case_ = ( collection[index], collection[index - 1], ) insert_next(SCREAMING_SNAKE_CASE__ , index + 1 ) if __name__ == "__main__": lowerCAmelCase_ = input('''Enter integers separated by spaces: ''') lowerCAmelCase_ = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
8
import math def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(SCREAMING_SNAKE_CASE__ ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError('''This should never happen''' ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. lowerCAmelCase_ = '''Enter the base and the power separated by a comma: ''' lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''',''')) lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''',''')) # We find the log of each number, using the function res(), which takes two # arguments. lowerCAmelCase_ = res(xa, ya) lowerCAmelCase_ = res(xa, ya) # We check for the largest number if resa > resa: print('''Largest number is''', xa, '''^''', ya) elif resa > resa: print('''Largest number is''', xa, '''^''', ya) else: print('''Both are equal''')
8
1
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = { '''en''': '''Machine learning is great, isn\'t it?''', '''ru''': '''Машинное обучение - это здорово, не так ли?''', '''de''': '''Maschinelles Lernen ist großartig, oder?''', } # BLUE scores as follows: # "pair": [fairseq, transformers] snake_case_ = { '''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''], '''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''], '''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''], '''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''], } snake_case_ = F'''{src_lang}-{tgt_lang}''' snake_case_ = F''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - facebook license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}. For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616). The abbreviation FSMT stands for FairSeqMachineTranslation All four models are available: * [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) * [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en) * [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de) * [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "facebook/wmt19-{src_lang}-{tgt_lang}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias - The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981) ## Training data Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616). ## Eval results pair | fairseq | transformers -------|---------|---------- {pair} | {scores[pair][0]} | {scores[pair][1]} The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support: - model ensemble, therefore the best performing checkpoint was ported (``model4.pt``). - re-ranking The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=15 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`. ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ```bibtex @inproceedings{{..., year={{2020}}, title={{Facebook FAIR\'s WMT19 News Translation Task Submission}}, author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}}, booktitle={{Proc. of WMT}}, }} ``` ## TODO - port model ensemble (fairseq uses 4 model checkpoints) ''' os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''README.md''' ) print(F'''Generating {path}''' ) with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f: f.write(SCREAMING_SNAKE_CASE__ ) # make sure we are under the root of the project lowerCAmelCase_ = Path(__file__).resolve().parent.parent.parent lowerCAmelCase_ = repo_dir / '''model_cards''' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = model_name.split('''-''') lowerCAmelCase_ = model_cards_dir / '''facebook''' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
8
import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'''vocab_file''': '''spiece.model'''} lowerCAmelCase_ = { '''vocab_file''': { '''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''', '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model''' ), } } lowerCAmelCase_ = { '''google/bigbird-roberta-base''': 40_96, '''google/bigbird-roberta-large''': 40_96, '''google/bigbird-base-trivia-itc''': 40_96, } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : List[Any] = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE : List[int] = [] def __init__( self : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict="<unk>" , _UpperCamelCase : List[str]="<s>" , _UpperCamelCase : Tuple="</s>" , _UpperCamelCase : Any="<pad>" , _UpperCamelCase : Any="[SEP]" , _UpperCamelCase : Optional[Any]="[MASK]" , _UpperCamelCase : Any="[CLS]" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Dict , ) ->None: snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else bos_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else eos_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else unk_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else pad_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else cls_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else sep_token # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , sep_token=_UpperCamelCase , mask_token=_UpperCamelCase , cls_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , ) snake_case_ = vocab_file snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCamelCase ) @property def snake_case__( self : str ) ->List[Any]: return self.sp_model.get_piece_size() def snake_case__( self : int ) ->Union[str, Any]: snake_case_ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Tuple ) ->Any: snake_case_ = self.__dict__.copy() snake_case_ = None return state def __setstate__( self : str , _UpperCamelCase : List[Any] ) ->List[str]: snake_case_ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): snake_case_ = {} snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def snake_case__( self : Optional[int] , _UpperCamelCase : str ) ->List[str]: return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase ) def snake_case__( self : str , _UpperCamelCase : List[str] ) ->Tuple: return self.sp_model.piece_to_id(_UpperCamelCase ) def snake_case__( self : Union[str, Any] , _UpperCamelCase : str ) ->List[Any]: snake_case_ = self.sp_model.IdToPiece(_UpperCamelCase ) return token def snake_case__( self : Dict , _UpperCamelCase : Optional[int] ) ->List[str]: snake_case_ = [] snake_case_ = '''''' snake_case_ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_UpperCamelCase ) + token snake_case_ = True snake_case_ = [] else: current_sub_tokens.append(_UpperCamelCase ) snake_case_ = False out_string += self.sp_model.decode(_UpperCamelCase ) return out_string.strip() def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : bool = False , _UpperCamelCase : bool = None , _UpperCamelCase : bool = True , **_UpperCamelCase : List[str] , ) ->str: snake_case_ = kwargs.pop('''use_source_tokenizer''' , _UpperCamelCase ) snake_case_ = self.convert_ids_to_tokens(_UpperCamelCase , skip_special_tokens=_UpperCamelCase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 snake_case_ = [] snake_case_ = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) ) snake_case_ = [] sub_texts.append(_UpperCamelCase ) else: current_sub_text.append(_UpperCamelCase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: snake_case_ = re.sub(R''' (\[(MASK|SEP)\])''' , R'''\1''' , ''' '''.join(_UpperCamelCase ) ) else: snake_case_ = ''''''.join(_UpperCamelCase ) snake_case_ = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: snake_case_ = self.clean_up_tokenization(_UpperCamelCase ) return clean_text else: return text def snake_case__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]: if not os.path.isdir(_UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case_ = os.path.join( _UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCamelCase , '''wb''' ) as fi: snake_case_ = self.sp_model.serialized_model_proto() fi.write(_UpperCamelCase ) return (out_vocab_file,) def snake_case__( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def snake_case__( self : List[str] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) ->List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCamelCase )) + [1] return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1] def snake_case__( self : List[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
8
1
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = "Wav2Vec2FeatureExtractor" SCREAMING_SNAKE_CASE : Dict = "AutoTokenizer" def __init__( self : str , _UpperCamelCase : Dict , _UpperCamelCase : str ) ->Dict: super().__init__(_UpperCamelCase , _UpperCamelCase ) snake_case_ = self.feature_extractor snake_case_ = False @classmethod def snake_case__( cls : Union[str, Any] , _UpperCamelCase : str , **_UpperCamelCase : List[str] ) ->Dict: try: return super().from_pretrained(_UpperCamelCase , **_UpperCamelCase ) except OSError: warnings.warn( f'''Loading a tokenizer inside {cls.__name__} from a config that does not''' ''' include a `tokenizer_class` attribute is deprecated and will be ''' '''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`''' ''' attribute to either your `config.json` or `tokenizer_config.json` ''' '''file to suppress this warning: ''' , _UpperCamelCase , ) snake_case_ = WavaVecaFeatureExtractor.from_pretrained(_UpperCamelCase , **_UpperCamelCase ) snake_case_ = WavaVecaCTCTokenizer.from_pretrained(_UpperCamelCase , **_UpperCamelCase ) return cls(feature_extractor=_UpperCamelCase , tokenizer=_UpperCamelCase ) def __call__( self : Dict , *_UpperCamelCase : int , **_UpperCamelCase : Dict ) ->Tuple: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_UpperCamelCase , **_UpperCamelCase ) if "raw_speech" in kwargs: warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' ) snake_case_ = kwargs.pop('''raw_speech''' ) else: snake_case_ = kwargs.pop('''audio''' , _UpperCamelCase ) snake_case_ = kwargs.pop('''sampling_rate''' , _UpperCamelCase ) snake_case_ = kwargs.pop('''text''' , _UpperCamelCase ) if len(_UpperCamelCase ) > 0: snake_case_ = args[0] snake_case_ = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: snake_case_ = self.feature_extractor(_UpperCamelCase , *_UpperCamelCase , sampling_rate=_UpperCamelCase , **_UpperCamelCase ) if text is not None: snake_case_ = self.tokenizer(_UpperCamelCase , **_UpperCamelCase ) if text is None: return inputs elif audio is None: return encodings else: snake_case_ = encodings['''input_ids'''] return inputs def snake_case__( self : Any , *_UpperCamelCase : List[str] , **_UpperCamelCase : Optional[int] ) ->int: # For backward compatibility if self._in_target_context_manager: return self.current_processor.pad(*_UpperCamelCase , **_UpperCamelCase ) snake_case_ = kwargs.pop('''input_features''' , _UpperCamelCase ) snake_case_ = kwargs.pop('''labels''' , _UpperCamelCase ) if len(_UpperCamelCase ) > 0: snake_case_ = args[0] snake_case_ = args[1:] if input_features is not None: snake_case_ = self.feature_extractor.pad(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase ) if labels is not None: snake_case_ = self.tokenizer.pad(_UpperCamelCase , **_UpperCamelCase ) if labels is None: return input_features elif input_features is None: return labels else: snake_case_ = labels['''input_ids'''] return input_features def snake_case__( self : Union[str, Any] , *_UpperCamelCase : str , **_UpperCamelCase : Optional[int] ) ->Optional[int]: return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase ) def snake_case__( self : List[str] , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : str ) ->Optional[Any]: return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase ) @contextmanager def snake_case__( self : str ) ->List[Any]: warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your audio inputs, or in a separate call.''' ) snake_case_ = True snake_case_ = self.tokenizer yield snake_case_ = self.feature_extractor snake_case_ = False
8
from __future__ import annotations from collections.abc import Generator def __SCREAMING_SNAKE_CASE (): snake_case_ = {} snake_case_ = 2 while True: snake_case_ = factor_map.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if factor: snake_case_ = factor + prime while x in factor_map: x += factor snake_case_ = factor else: snake_case_ = prime yield prime prime += 1 def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 1E10 ): snake_case_ = sieve() snake_case_ = 1 while True: snake_case_ = next(SCREAMING_SNAKE_CASE__ ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(SCREAMING_SNAKE_CASE__ ) n += 2 if __name__ == "__main__": print(solution())
8
1
from binascii import hexlify from hashlib import shaaaa from os import urandom # RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for # Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526 lowerCAmelCase_ = { # 1536-bit 5: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 2048-bit 14: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 3072-bit 15: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64''' + '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7''' + '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B''' + '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31''' + '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 4096-bit 16: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64''' + '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7''' + '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B''' + '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31''' + '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7''' + '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA''' + '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6''' + '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED''' + '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9''' + '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199''' + '''FFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 6144-bit 17: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08''' + '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B''' + '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9''' + '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6''' + '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8''' + '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C''' + '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718''' + '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D''' + '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D''' + '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226''' + '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC''' + '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26''' + '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB''' + '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2''' + '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127''' + '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492''' + '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406''' + '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918''' + '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151''' + '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03''' + '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F''' + '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA''' + '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B''' + '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632''' + '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E''' + '''6DCC4024FFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 8192-bit 18: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64''' + '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7''' + '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B''' + '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31''' + '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7''' + '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA''' + '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6''' + '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED''' + '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9''' + '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492''' + '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD''' + '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831''' + '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B''' + '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF''' + '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6''' + '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3''' + '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA''' + '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328''' + '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C''' + '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE''' + '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4''' + '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300''' + '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568''' + '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9''' + '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B''' + '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A''' + '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36''' + '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1''' + '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92''' + '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47''' + '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71''' + '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, } class snake_case_ : '''simple docstring''' def __init__( self : List[str] , _UpperCamelCase : int = 1_4 ) ->None: if group not in primes: raise ValueError('''Unsupported Group''' ) snake_case_ = primes[group]['''prime'''] snake_case_ = primes[group]['''generator'''] snake_case_ = int(hexlify(urandom(3_2 ) ) , base=1_6 ) def snake_case__( self : int ) ->str: return hex(self.__private_key )[2:] def snake_case__( self : List[str] ) ->str: snake_case_ = pow(self.generator , self.__private_key , self.prime ) return hex(_UpperCamelCase )[2:] def snake_case__( self : Optional[Any] , _UpperCamelCase : int ) ->bool: # check if the other public key is valid based on NIST SP800-56 return ( 2 <= key <= self.prime - 2 and pow(_UpperCamelCase , (self.prime - 1) // 2 , self.prime ) == 1 ) def snake_case__( self : List[str] , _UpperCamelCase : str ) ->str: snake_case_ = int(_UpperCamelCase , base=1_6 ) if not self.is_valid_public_key(_UpperCamelCase ): raise ValueError('''Invalid public key''' ) snake_case_ = pow(_UpperCamelCase , self.__private_key , self.prime ) return shaaaa(str(_UpperCamelCase ).encode() ).hexdigest() @staticmethod def snake_case__( _UpperCamelCase : int , _UpperCamelCase : int ) ->bool: # check if the other public key is valid based on NIST SP800-56 return ( 2 <= remote_public_key_str <= prime - 2 and pow(_UpperCamelCase , (prime - 1) // 2 , _UpperCamelCase ) == 1 ) @staticmethod def snake_case__( _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : int = 1_4 ) ->str: snake_case_ = int(_UpperCamelCase , base=1_6 ) snake_case_ = int(_UpperCamelCase , base=1_6 ) snake_case_ = primes[group]['''prime'''] if not DiffieHellman.is_valid_public_key_static(_UpperCamelCase , _UpperCamelCase ): raise ValueError('''Invalid public key''' ) snake_case_ = pow(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) return shaaaa(str(_UpperCamelCase ).encode() ).hexdigest() if __name__ == "__main__": import doctest doctest.testmod()
8
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OPTForCausalLM''', '''OPTModel''', '''OPTPreTrainedModel''', '''OPTForSequenceClassification''', '''OPTForQuestionAnswering''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''FlaxOPTForCausalLM''', '''FlaxOPTModel''', '''FlaxOPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
8
1
import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger('''transformers.models.speecht5''') lowerCAmelCase_ = { '''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''', '''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''', '''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''', '''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''', } lowerCAmelCase_ = { '''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''', '''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''', } lowerCAmelCase_ = { '''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''', '''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''', '''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''', '''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''', '''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''', } lowerCAmelCase_ = { '''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''', '''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''', '''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''', '''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''', '''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''', '''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''', '''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''', '''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''', '''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''', '''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''', '''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''', '''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''', } lowerCAmelCase_ = { '''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''', } lowerCAmelCase_ = { '''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''', } lowerCAmelCase_ = { '''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''', '''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''', '''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''', '''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''', '''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''', '''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''', '''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''', '''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''', '''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''', } lowerCAmelCase_ = { '''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''', '''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''', '''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''', '''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''', '''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''', '''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''', '''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''', '''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''', '''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''', '''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''', '''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''', '''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''', '''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''', } lowerCAmelCase_ = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } lowerCAmelCase_ = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } lowerCAmelCase_ = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } lowerCAmelCase_ = [] lowerCAmelCase_ = [ '''encoder.version''', '''encoder.layers.*.norm_k.weight''', '''encoder.layers.*.norm_k.bias''', '''decoder.version''', '''decoder.layers.*.norm_k.weight''', '''decoder.layers.*.norm_k.bias''', '''decoder.pos_emb.pe_k''', '''speech_encoder_prenet.embed_positions._float_tensor''', '''text_decoder_prenet.embed_positions._float_tensor''', ] lowerCAmelCase_ = IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''speech_decoder_prenet.*''', '''speech_decoder_postnet.*''', ] lowerCAmelCase_ = IGNORE_KEYS + [ '''encoder.proj''', '''speech_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] lowerCAmelCase_ = IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): for attribute in key.split('''.''' ): snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if weight_type is not None: snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape else: snake_case_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": snake_case_ = value elif weight_type == "weight_g": snake_case_ = value elif weight_type == "weight_v": snake_case_ = value elif weight_type == "bias": snake_case_ = value elif weight_type == "running_mean": snake_case_ = value elif weight_type == "running_var": snake_case_ = value elif weight_type == "num_batches_tracked": snake_case_ = value else: snake_case_ = value logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): for key in ignore_keys: if key.endswith('''.*''' ): if name.startswith(key[:-1] ): return True elif ".*." in key: snake_case_, snake_case_ = key.split('''.*.''' ) if prefix in name and suffix in name: return True elif key in name: return True return False def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = [] if task == "s2t": snake_case_ = hf_model.speechta.encoder.prenet.feature_encoder snake_case_ = MAPPING_S2T snake_case_ = IGNORE_KEYS_S2T elif task == "t2s": snake_case_ = None snake_case_ = MAPPING_T2S snake_case_ = IGNORE_KEYS_T2S elif task == "s2s": snake_case_ = hf_model.speechta.encoder.prenet.feature_encoder snake_case_ = MAPPING_S2S snake_case_ = IGNORE_KEYS_S2S else: raise ValueError(F'''Unsupported task: {task}''' ) for name, value in fairseq_dict.items(): if should_ignore(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): logger.info(F'''{name} was ignored''' ) continue snake_case_ = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == '''group''' , ) snake_case_ = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: snake_case_, snake_case_ = key.split('''.*.''' ) if prefix in name and suffix in name: snake_case_ = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: snake_case_ = True if "*" in mapped_key: snake_case_ = name.split(SCREAMING_SNAKE_CASE__ )[0].split('''.''' )[-2] snake_case_ = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE__ ) if "weight_g" in name: snake_case_ = '''weight_g''' elif "weight_v" in name: snake_case_ = '''weight_v''' elif "bias" in name: snake_case_ = '''bias''' elif "weight" in name: snake_case_ = '''weight''' elif "running_mean" in name: snake_case_ = '''running_mean''' elif "running_var" in name: snake_case_ = '''running_var''' elif "num_batches_tracked" in name: snake_case_ = '''num_batches_tracked''' else: snake_case_ = None set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) continue if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE__ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = full_name.split('''conv_layers.''' )[-1] snake_case_ = name.split('''.''' ) snake_case_ = int(items[0] ) snake_case_ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) snake_case_ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) snake_case_ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) snake_case_ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) snake_case_ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(SCREAMING_SNAKE_CASE__ ) @torch.no_grad() def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , ): if config_path is not None: snake_case_ = SpeechTaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) else: snake_case_ = SpeechTaConfig() if task == "s2t": snake_case_ = config.max_text_positions snake_case_ = SpeechTaForSpeechToText(SCREAMING_SNAKE_CASE__ ) elif task == "t2s": snake_case_ = 1876 snake_case_ = 600 snake_case_ = config.max_speech_positions snake_case_ = SpeechTaForTextToSpeech(SCREAMING_SNAKE_CASE__ ) elif task == "s2s": snake_case_ = 1876 snake_case_ = config.max_speech_positions snake_case_ = SpeechTaForSpeechToSpeech(SCREAMING_SNAKE_CASE__ ) else: raise ValueError(F'''Unknown task name: {task}''' ) if vocab_path: snake_case_ = SpeechTaTokenizer(SCREAMING_SNAKE_CASE__ , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it snake_case_ = AddedToken('''<mask>''' , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) snake_case_ = mask_token tokenizer.add_special_tokens({'''mask_token''': mask_token} ) tokenizer.add_tokens(['''<ctc_blank>'''] ) snake_case_ = SpeechTaFeatureExtractor() snake_case_ = SpeechTaProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ ) processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) snake_case_ = torch.load(SCREAMING_SNAKE_CASE__ ) recursively_load_weights(fairseq_checkpoint['''model'''] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) if repo_id: print('''Pushing to the hub...''' ) processor.push_to_hub(SCREAMING_SNAKE_CASE__ ) model.push_to_hub(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( '''--task''', default='''s2t''', type=str, help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) lowerCAmelCase_ = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = "philschmid/bart-large-cnn-samsum" SCREAMING_SNAKE_CASE : Tuple = ( "This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, " "and returns a summary of the text." ) SCREAMING_SNAKE_CASE : str = "summarizer" SCREAMING_SNAKE_CASE : str = AutoTokenizer SCREAMING_SNAKE_CASE : str = AutoModelForSeqaSeqLM SCREAMING_SNAKE_CASE : Optional[int] = ["text"] SCREAMING_SNAKE_CASE : Optional[int] = ["text"] def snake_case__( self : str , _UpperCamelCase : int ) ->Optional[int]: return self.pre_processor(_UpperCamelCase , return_tensors='''pt''' , truncation=_UpperCamelCase ) def snake_case__( self : Tuple , _UpperCamelCase : Optional[int] ) ->Tuple: return self.model.generate(**_UpperCamelCase )[0] def snake_case__( self : Optional[Any] , _UpperCamelCase : Optional[int] ) ->Any: return self.pre_processor.decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
8
1
import json import os import torch from diffusers import UNetaDModel os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True) os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True) os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if hor == 128: snake_case_ = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''') snake_case_ = (32, 128, 256) snake_case_ = ('''UpResnetBlock1D''', '''UpResnetBlock1D''') elif hor == 32: snake_case_ = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''') snake_case_ = (32, 64, 128, 256) snake_case_ = ('''UpResnetBlock1D''', '''UpResnetBlock1D''', '''UpResnetBlock1D''') snake_case_ = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' ) snake_case_ = model.state_dict() snake_case_ = { '''down_block_types''': down_block_types, '''block_out_channels''': block_out_channels, '''up_block_types''': up_block_types, '''layers_per_block''': 1, '''use_timestep_embedding''': True, '''out_block_type''': '''OutConv1DBlock''', '''norm_num_groups''': 8, '''downsample_each_block''': False, '''in_channels''': 14, '''out_channels''': 14, '''extra_in_channels''': 0, '''time_embedding_type''': '''positional''', '''flip_sin_to_cos''': False, '''freq_shift''': 1, '''sample_size''': 65536, '''mid_block_type''': '''MidResTemporalBlock1D''', '''act_fn''': '''mish''', } snake_case_ = UNetaDModel(**SCREAMING_SNAKE_CASE__ ) print(F'''length of state dict: {len(state_dict.keys() )}''' ) print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) snake_case_ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): snake_case_ = state_dict.pop(SCREAMING_SNAKE_CASE__ ) hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE__ ) torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' ) with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , '''w''' ) as f: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __SCREAMING_SNAKE_CASE (): snake_case_ = { '''in_channels''': 14, '''down_block_types''': ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D'''), '''up_block_types''': (), '''out_block_type''': '''ValueFunction''', '''mid_block_type''': '''ValueFunctionMidBlock1D''', '''block_out_channels''': (32, 64, 128, 256), '''layers_per_block''': 1, '''downsample_each_block''': True, '''sample_size''': 65536, '''out_channels''': 14, '''extra_in_channels''': 0, '''time_embedding_type''': '''positional''', '''use_timestep_embedding''': True, '''flip_sin_to_cos''': False, '''freq_shift''': 1, '''norm_num_groups''': 8, '''act_fn''': '''mish''', } snake_case_ = torch.load('''/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch''' ) snake_case_ = model snake_case_ = UNetaDModel(**SCREAMING_SNAKE_CASE__ ) print(F'''length of state dict: {len(state_dict.keys() )}''' ) print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) snake_case_ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): snake_case_ = state_dict.pop(SCREAMING_SNAKE_CASE__ ) hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE__ ) torch.save(hf_value_function.state_dict() , '''hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin''' ) with open('''hub/hopper-medium-v2/value_function/config.json''' , '''w''' ) as f: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": unet(32) # unet(128) value_function()
8
from collections import deque from .hash_table import HashTable class snake_case_ ( __A ): '''simple docstring''' def __init__( self : int , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->Tuple: super().__init__(*_UpperCamelCase , **_UpperCamelCase ) def snake_case__( self : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Dict ) ->Tuple: snake_case_ = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(_UpperCamelCase ) snake_case_ = self.values[key] def snake_case__( self : List[Any] ) ->str: return ( sum(self.charge_factor - len(_UpperCamelCase ) for slot in self.values ) / self.size_table * self.charge_factor ) def snake_case__( self : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int]=None ) ->str: if not ( len(self.values[key] ) == self.charge_factor and self.values.count(_UpperCamelCase ) == 0 ): return key return super()._collision_resolution(_UpperCamelCase , _UpperCamelCase )
8
1
from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar lowerCAmelCase_ = TypeVar('''T''') class snake_case_ ( Generic[T] ): '''simple docstring''' SCREAMING_SNAKE_CASE : deque[T] # Cache store of keys SCREAMING_SNAKE_CASE : set[T] # References of the keys in cache SCREAMING_SNAKE_CASE : int = 10 # Maximum capacity of cache def __init__( self : Dict , _UpperCamelCase : int ) ->None: snake_case_ = deque() snake_case_ = set() if not n: snake_case_ = sys.maxsize elif n < 0: raise ValueError('''n should be an integer greater than 0.''' ) else: snake_case_ = n def snake_case__( self : Dict , _UpperCamelCase : T ) ->None: if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: snake_case_ = self.dq_store.pop() self.key_reference.remove(_UpperCamelCase ) else: self.dq_store.remove(_UpperCamelCase ) self.dq_store.appendleft(_UpperCamelCase ) self.key_reference.add(_UpperCamelCase ) def snake_case__( self : List[Any] ) ->None: for k in self.dq_store: print(_UpperCamelCase ) def __repr__( self : Optional[Any] ) ->str: return f'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}''' if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase_ = LRUCache(4) lru_cache.refer('''A''') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('''A''') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
8
from __future__ import annotations def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = len(SCREAMING_SNAKE_CASE__ ) # We need to create solution object to save path. snake_case_ = [[0 for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )] snake_case_ = run_maze(SCREAMING_SNAKE_CASE__ , 0 , 0 , SCREAMING_SNAKE_CASE__ ) if solved: print('''\n'''.join(str(SCREAMING_SNAKE_CASE__ ) for row in solutions ) ) else: print('''No solution exists!''' ) return solved def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = len(SCREAMING_SNAKE_CASE__ ) # Final check point. if i == j == (size - 1): snake_case_ = 1 return True snake_case_ = (not i < 0) and (not j < 0) # Check lower bounds snake_case_ = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. snake_case_ = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited snake_case_ = 1 # check for directions if ( run_maze(SCREAMING_SNAKE_CASE__ , i + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j + 1 , SCREAMING_SNAKE_CASE__ ) or run_maze(SCREAMING_SNAKE_CASE__ , i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j - 1 , SCREAMING_SNAKE_CASE__ ) ): return True snake_case_ = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
8
1
from __future__ import annotations from collections.abc import Callable def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 100 , ): snake_case_ = x_start snake_case_ = fnc(SCREAMING_SNAKE_CASE__ ) snake_case_ = 0.0 for _ in range(SCREAMING_SNAKE_CASE__ ): # Approximates small segments of curve as linear and solve # for trapezoidal area snake_case_ = (x_end - x_start) / steps + xa snake_case_ = fnc(SCREAMING_SNAKE_CASE__ ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step snake_case_ = xa snake_case_ = fxa return area if __name__ == "__main__": def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return x**3 + x**2 print('''f(x) = x^3 + x^2''') print('''The area between the curve, x = -5, x = 5 and the x axis is:''') lowerCAmelCase_ = 10 while i <= 10_00_00: print(f"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""") i *= 10
8
from decimal import Decimal, getcontext from math import ceil, factorial def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise TypeError('''Undefined for non-integers''' ) elif precision < 1: raise ValueError('''Undefined for non-natural numbers''' ) snake_case_ = precision snake_case_ = ceil(precision / 14 ) snake_case_ = 426880 * Decimal(10005 ).sqrt() snake_case_ = 1 snake_case_ = 13591409 snake_case_ = Decimal(SCREAMING_SNAKE_CASE__ ) for k in range(1 , SCREAMING_SNAKE_CASE__ ): snake_case_ = factorial(6 * k ) // (factorial(3 * k ) * factorial(SCREAMING_SNAKE_CASE__ ) ** 3) linear_term += 545140134 exponential_term *= -262537412640768000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": lowerCAmelCase_ = 50 print(f"""The first {n} digits of pi is: {pi(n)}""")
8
1
import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError('''Quantized models are not supported.''' ) snake_case_ = re.match(R'''^mobilenet_v1_([^_]*)_([^_]*)$''' , SCREAMING_SNAKE_CASE__ ) if matches: snake_case_ = float(matches[1] ) snake_case_ = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". snake_case_ = 1001 snake_case_ = '''imagenet-1k-id2label.json''' snake_case_ = '''huggingface/label-files''' snake_case_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) ) snake_case_ = {int(SCREAMING_SNAKE_CASE__ ) + 1: v for k, v in idalabel.items()} snake_case_ = '''background''' snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} return config def __SCREAMING_SNAKE_CASE (): snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' snake_case_ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ) return im @torch.no_grad() def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ): snake_case_ = get_mobilenet_va_config(SCREAMING_SNAKE_CASE__ ) # Load 🤗 model snake_case_ = MobileNetVaForImageClassification(SCREAMING_SNAKE_CASE__ ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor snake_case_ = MobileNetVaImageProcessor( crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , ) snake_case_ = image_processor(images=prepare_img() , return_tensors='''pt''' ) snake_case_ = model(**SCREAMING_SNAKE_CASE__ ) snake_case_ = outputs.logits assert logits.shape == (1, 1001) if model_name == "mobilenet_v1_1.0_224": snake_case_ = torch.tensor([-4.1739, -1.1233, 3.1205] ) elif model_name == "mobilenet_v1_0.75_192": snake_case_ = torch.tensor([-3.9440, -2.3141, -0.3333] ) else: snake_case_ = None if expected_logits is not None: assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) if push_to_hub: print('''Pushing to the hub...''' ) snake_case_ = '''google/''' + model_name image_processor.push_to_hub(SCREAMING_SNAKE_CASE__ ) model.push_to_hub(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''mobilenet_v1_1.0_224''', type=str, help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''', ) parser.add_argument( '''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowerCAmelCase_ = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
8
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class snake_case_ ( __A ): '''simple docstring''' def __init__( self : int , _UpperCamelCase : pyspark.sql.DataFrame , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : bool = True , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : str = None , _UpperCamelCase : bool = True , _UpperCamelCase : str = "arrow" , **_UpperCamelCase : Tuple , ) ->str: super().__init__( split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , **_UpperCamelCase , ) snake_case_ = load_from_cache_file snake_case_ = file_format snake_case_ = Spark( df=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , working_dir=_UpperCamelCase , **_UpperCamelCase , ) def snake_case__( self : int ) ->Tuple: if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) snake_case_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=_UpperCamelCase , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
8
1
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable lowerCAmelCase_ = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''DPTFeatureExtractor'''] lowerCAmelCase_ = ['''DPTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DPTForDepthEstimation''', '''DPTForSemanticSegmentation''', '''DPTModel''', '''DPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
8
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable lowerCAmelCase_ = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''DPTFeatureExtractor'''] lowerCAmelCase_ = ['''DPTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DPTForDepthEstimation''', '''DPTForSemanticSegmentation''', '''DPTModel''', '''DPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
8
1
import json import sys def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as f: snake_case_ = json.load(SCREAMING_SNAKE_CASE__ ) snake_case_ = ['''<details>''', '''<summary>Show updated benchmarks!</summary>''', ''' '''] for benchmark_name in sorted(SCREAMING_SNAKE_CASE__ ): snake_case_ = results[benchmark_name] snake_case_ = benchmark_name.split('''/''' )[-1] output_md.append(F'''### Benchmark: {benchmark_file_name}''' ) snake_case_ = '''| metric |''' snake_case_ = '''|--------|''' snake_case_ = '''| new / old (diff) |''' for metric_name in sorted(SCREAMING_SNAKE_CASE__ ): snake_case_ = benchmark_res[metric_name] snake_case_ = metric_vals['''new'''] snake_case_ = metric_vals.get('''old''' , SCREAMING_SNAKE_CASE__ ) snake_case_ = metric_vals.get('''diff''' , SCREAMING_SNAKE_CASE__ ) snake_case_ = F''' {new_val:f}''' if isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ) else '''None''' if old_val is not None: val_str += F''' / {old_val:f}''' if isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ) else "None" if dif_val is not None: val_str += F''' ({dif_val:f})''' if isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append('''</details>''' ) with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f: f.writelines('''\n'''.join(SCREAMING_SNAKE_CASE__ ) ) if __name__ == "__main__": lowerCAmelCase_ = sys.argv[1] lowerCAmelCase_ = sys.argv[2] format_json_to_md(input_json_file, output_md_file)
8
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer lowerCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase_ = { '''vocab_file''': { '''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''', }, '''tokenizer_file''': { '''unc-nlp/lxmert-base-uncased''': ( '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase_ = { '''unc-nlp/lxmert-base-uncased''': 5_12, } lowerCAmelCase_ = { '''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True}, } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : Any = LxmertTokenizer def __init__( self : Union[str, Any] , _UpperCamelCase : int=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Dict=True , _UpperCamelCase : Any="[UNK]" , _UpperCamelCase : Tuple="[SEP]" , _UpperCamelCase : List[Any]="[PAD]" , _UpperCamelCase : Union[str, Any]="[CLS]" , _UpperCamelCase : str="[MASK]" , _UpperCamelCase : List[str]=True , _UpperCamelCase : List[str]=None , **_UpperCamelCase : List[str] , ) ->Any: super().__init__( _UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , ) snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _UpperCamelCase ) != do_lower_case or normalizer_state.get('''strip_accents''' , _UpperCamelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _UpperCamelCase ) != tokenize_chinese_chars ): snake_case_ = getattr(_UpperCamelCase , normalizer_state.pop('''type''' ) ) snake_case_ = do_lower_case snake_case_ = strip_accents snake_case_ = tokenize_chinese_chars snake_case_ = normalizer_class(**_UpperCamelCase ) snake_case_ = do_lower_case def snake_case__( self : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=None ) ->List[Any]: snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case__( self : Any , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]: snake_case_ = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase ) return tuple(_UpperCamelCase )
8
1
from datetime import datetime import matplotlib.pyplot as plt import torch def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): for param in module.parameters(): snake_case_ = False def __SCREAMING_SNAKE_CASE (): snake_case_ = '''cuda''' if torch.cuda.is_available() else '''cpu''' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): snake_case_ = '''mps''' if device == "mps": print( '''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch''' ''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues''' ''' with generations.''' ) return device def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = plt.imshow(SCREAMING_SNAKE_CASE__ ) fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE__ ) fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE__ ) plt.show() def __SCREAMING_SNAKE_CASE (): snake_case_ = datetime.now() snake_case_ = current_time.strftime('''%H:%M:%S''' ) return timestamp
8
import math def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 10001 ): try: snake_case_ = int(SCREAMING_SNAKE_CASE__ ) except (TypeError, ValueError): raise TypeError('''Parameter nth must be int or castable to int.''' ) from None if nth <= 0: raise ValueError('''Parameter nth must be greater than or equal to one.''' ) snake_case_ = [] snake_case_ = 2 while len(SCREAMING_SNAKE_CASE__ ) < nth: if is_prime(SCREAMING_SNAKE_CASE__ ): primes.append(SCREAMING_SNAKE_CASE__ ) num += 1 else: num += 1 return primes[len(SCREAMING_SNAKE_CASE__ ) - 1] if __name__ == "__main__": print(f"""{solution() = }""")
8
1
import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): # picklable for multiprocessing return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def __SCREAMING_SNAKE_CASE (): with parallel_backend('''spark''' ): assert ParallelBackendConfig.backend_name == "spark" snake_case_ = [1, 2, 3] with pytest.raises(SCREAMING_SNAKE_CASE__ ): with parallel_backend('''unsupported backend''' ): map_nested(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num_proc=2 ) with pytest.raises(SCREAMING_SNAKE_CASE__ ): with parallel_backend('''unsupported backend''' ): map_nested(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize('''num_proc''' , [2, -1] ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = [1, 2] snake_case_ = {'''a''': 1, '''b''': 2} snake_case_ = {'''a''': [1, 2], '''b''': [3, 4]} snake_case_ = {'''a''': {'''1''': 1}, '''b''': 2} snake_case_ = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4} snake_case_ = [2, 3] snake_case_ = {'''a''': 2, '''b''': 3} snake_case_ = {'''a''': [2, 3], '''b''': [4, 5]} snake_case_ = {'''a''': {'''1''': 2}, '''b''': 3} snake_case_ = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5} with parallel_backend('''spark''' ): assert map_nested(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num_proc=SCREAMING_SNAKE_CASE__ ) == expected_map_nested_sa assert map_nested(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num_proc=SCREAMING_SNAKE_CASE__ ) == expected_map_nested_sa assert map_nested(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num_proc=SCREAMING_SNAKE_CASE__ ) == expected_map_nested_sa assert map_nested(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num_proc=SCREAMING_SNAKE_CASE__ ) == expected_map_nested_sa assert map_nested(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num_proc=SCREAMING_SNAKE_CASE__ ) == expected_map_nested_sa
8
from sklearn.metrics import mean_squared_error import datasets lowerCAmelCase_ = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' lowerCAmelCase_ = '''\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. ''' lowerCAmelCase_ = ''' Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. "raw_values" : Returns a full set of errors in case of multioutput input. "uniform_average" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric("mse") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {\'mse\': 0.6123724356957945} If you\'re using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric("mse", "multilist") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {\'mse\': array([0.41666667, 1. ])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case_ ( datasets.Metric ): '''simple docstring''' def snake_case__( self : Optional[int] ) ->List[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def snake_case__( self : List[Any] ) ->Optional[int]: if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''' ) ), "references": datasets.Sequence(datasets.Value('''float''' ) ), } else: return { "predictions": datasets.Value('''float''' ), "references": datasets.Value('''float''' ), } def snake_case__( self : int , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any=None , _UpperCamelCase : Optional[int]="uniform_average" , _UpperCamelCase : Tuple=True ) ->Tuple: snake_case_ = mean_squared_error( _UpperCamelCase , _UpperCamelCase , sample_weight=_UpperCamelCase , multioutput=_UpperCamelCase , squared=_UpperCamelCase ) return {"mse": mse}
8
1
from itertools import product def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = sides_number snake_case_ = max_face_number * dice_number snake_case_ = [0] * (max_total + 1) snake_case_ = 1 snake_case_ = range(SCREAMING_SNAKE_CASE__ , max_face_number + 1 ) for dice_numbers in product(SCREAMING_SNAKE_CASE__ , repeat=SCREAMING_SNAKE_CASE__ ): snake_case_ = sum(SCREAMING_SNAKE_CASE__ ) totals_frequencies[total] += 1 return totals_frequencies def __SCREAMING_SNAKE_CASE (): snake_case_ = total_frequency_distribution( sides_number=4 , dice_number=9 ) snake_case_ = total_frequency_distribution( sides_number=6 , dice_number=6 ) snake_case_ = 0 snake_case_ = 9 snake_case_ = 4 * 9 snake_case_ = 6 for peter_total in range(SCREAMING_SNAKE_CASE__ , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) snake_case_ = (4**9) * (6**6) snake_case_ = peter_wins_count / total_games_number snake_case_ = round(SCREAMING_SNAKE_CASE__ , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(f"""{solution() = }""")
8
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = [] if len(SCREAMING_SNAKE_CASE__ ) == 1: return [nums.copy()] for _ in range(len(SCREAMING_SNAKE_CASE__ ) ): snake_case_ = nums.pop(0 ) snake_case_ = permute(SCREAMING_SNAKE_CASE__ ) for perm in permutations: perm.append(SCREAMING_SNAKE_CASE__ ) result.extend(SCREAMING_SNAKE_CASE__ ) nums.append(SCREAMING_SNAKE_CASE__ ) return result def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): def backtrack(SCREAMING_SNAKE_CASE__ ): if start == len(SCREAMING_SNAKE_CASE__ ) - 1: output.append(nums[:] ) else: for i in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ): snake_case_, snake_case_ = nums[i], nums[start] backtrack(start + 1 ) snake_case_, snake_case_ = nums[i], nums[start] # backtrack snake_case_ = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function lowerCAmelCase_ = permutea([1, 2, 3]) print(res) doctest.testmod()
8
1
from math import pi def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(90, 10))
8
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''XGLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''XGLMTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XGLMForCausalLM''', '''XGLMModel''', '''XGLMPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''FlaxXGLMForCausalLM''', '''FlaxXGLMModel''', '''FlaxXGLMPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXGLMForCausalLM''', '''TFXGLMModel''', '''TFXGLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
8
1
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if a < 0: raise ValueError('''Input value must be a positive integer''' ) elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise TypeError('''Input value must be a \'int\' type''' ) return bin(SCREAMING_SNAKE_CASE__ ).count('''1''' ) if __name__ == "__main__": import doctest doctest.testmod()
8
from ..utils import DummyObject, requires_backends class snake_case_ ( metaclass=__A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = ["note_seq"] def __init__( self : Optional[int] , *_UpperCamelCase : str , **_UpperCamelCase : Optional[int] ) ->Any: requires_backends(self , ['''note_seq'''] ) @classmethod def snake_case__( cls : int , *_UpperCamelCase : Any , **_UpperCamelCase : List[Any] ) ->int: requires_backends(cls , ['''note_seq'''] ) @classmethod def snake_case__( cls : Dict , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Union[str, Any] ) ->List[str]: requires_backends(cls , ['''note_seq'''] )
8
1
import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint lowerCAmelCase_ = { '''169M''': 12, '''430M''': 24, '''1B5''': 24, '''3B''': 32, '''7B''': 32, '''14B''': 40, } lowerCAmelCase_ = { '''169M''': 7_68, '''430M''': 10_24, '''1B5''': 20_48, '''3B''': 25_60, '''7B''': 40_96, '''14B''': 51_20, } def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = list(state_dict.keys() ) for name in state_dict_keys: snake_case_ = state_dict.pop(SCREAMING_SNAKE_CASE__ ) # emb -> embedding if name.startswith('''emb.''' ): snake_case_ = name.replace('''emb.''' , '''embeddings.''' ) # ln_0 -> pre_ln (only present at block 0) if name.startswith('''blocks.0.ln0''' ): snake_case_ = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' ) # att -> attention snake_case_ = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , SCREAMING_SNAKE_CASE__ ) # ffn -> feed_forward snake_case_ = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , SCREAMING_SNAKE_CASE__ ) # time_mix_k -> time_mix_key and reshape if name.endswith('''.time_mix_k''' ): snake_case_ = name.replace('''.time_mix_k''' , '''.time_mix_key''' ) # time_mix_v -> time_mix_value and reshape if name.endswith('''.time_mix_v''' ): snake_case_ = name.replace('''.time_mix_v''' , '''.time_mix_value''' ) # time_mix_r -> time_mix_key and reshape if name.endswith('''.time_mix_r''' ): snake_case_ = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' ) if name != "head.weight": snake_case_ = '''rwkv.''' + name snake_case_ = weight return state_dict def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=None ): # 1. If possible, build the tokenizer. if tokenizer_file is None: print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' ) snake_case_ = 50277 snake_case_ = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' ) else: snake_case_ = PreTrainedTokenizerFast(tokenizer_file=SCREAMING_SNAKE_CASE__ ) snake_case_ = len(SCREAMING_SNAKE_CASE__ ) tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ ) # 2. Build the config snake_case_ = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: snake_case_ = candidate break if size is None: raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' ) if size not in possible_sizes: raise ValueError(F'''`size` should be one of {possible_sizes}, got {size}.''' ) snake_case_ = RwkvConfig( vocab_size=SCREAMING_SNAKE_CASE__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(SCREAMING_SNAKE_CASE__ ) # 3. Download model file then convert state_dict snake_case_ = hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case_ = torch.load(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' ) snake_case_ = convert_state_dict(SCREAMING_SNAKE_CASE__ ) # 4. Split in shards and save snake_case_, snake_case_ = shard_checkpoint(SCREAMING_SNAKE_CASE__ ) for shard_file, shard in shards.items(): torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) if index is not None: snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Save the index as well with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f: snake_case_ = json.dumps(SCREAMING_SNAKE_CASE__ , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ ) + '''\n''' f.write(SCREAMING_SNAKE_CASE__ ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( '''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' ) snake_case_ = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: snake_case_ = torch.load(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' ) snake_case_ = AutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ ) model.push_to_hub(SCREAMING_SNAKE_CASE__ , max_shard_size='''2GB''' ) tokenizer.push_to_hub(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.''' ) parser.add_argument( '''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.''' ) parser.add_argument( '''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.''' ) parser.add_argument( '''--tokenizer_file''', default=None, type=str, help='''Path to the tokenizer file to use (if not provided, only the model is converted).''', ) parser.add_argument( '''--size''', default=None, type=str, help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Push to the Hub the converted model.''', ) parser.add_argument( '''--model_name''', default=None, type=str, help='''Name of the pushed model on the Hub, including the username / organization.''', ) lowerCAmelCase_ = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
8
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = "vit_msn" def __init__( self : Dict , _UpperCamelCase : Optional[int]=7_6_8 , _UpperCamelCase : Optional[Any]=1_2 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : str=3_0_7_2 , _UpperCamelCase : Tuple="gelu" , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : List[str]=0.02 , _UpperCamelCase : List[Any]=1e-06 , _UpperCamelCase : Any=2_2_4 , _UpperCamelCase : Optional[Any]=1_6 , _UpperCamelCase : Any=3 , _UpperCamelCase : str=True , **_UpperCamelCase : Any , ) ->int: super().__init__(**_UpperCamelCase ) snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = qkv_bias
8
1
import functools def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = len(SCREAMING_SNAKE_CASE__ ) snake_case_ = len(SCREAMING_SNAKE_CASE__ ) @functools.cache def min_distance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa snake_case_ = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , SCREAMING_SNAKE_CASE__ ) , 1 + min_distance(SCREAMING_SNAKE_CASE__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
8
from __future__ import annotations from math import pi, sqrt def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if inductance <= 0: raise ValueError('''Inductance cannot be 0 or negative''' ) elif capacitance <= 0: raise ValueError('''Capacitance cannot be 0 or negative''' ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
8
1
import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 8 ): snake_case_ = ascii_letters + digits + punctuation return "".join(secrets.choice(SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ ) ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # Password Generator = full boot with random_number, random_letters, and # random_character FUNCTIONS # Put your code here... i -= len(SCREAMING_SNAKE_CASE__ ) snake_case_ = i // 3 snake_case_ = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) snake_case_ = ( chars_incl + random(SCREAMING_SNAKE_CASE__ , quotient + remainder ) + random(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) + random(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) snake_case_ = list(SCREAMING_SNAKE_CASE__ ) shuffle(SCREAMING_SNAKE_CASE__ ) return "".join(SCREAMING_SNAKE_CASE__ ) # random is a generalised function for letters, characters and numbers def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return "".join(secrets.choice(SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ ) ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): pass # Put your code here... def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): pass # Put your code here... def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): pass # Put your code here... def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 8 ): if len(SCREAMING_SNAKE_CASE__ ) < min_length: # Your Password must be at least 8 characters long return False snake_case_ = any(char in ascii_uppercase for char in password ) snake_case_ = any(char in ascii_lowercase for char in password ) snake_case_ = any(char in digits for char in password ) snake_case_ = any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def __SCREAMING_SNAKE_CASE (): snake_case_ = int(input('''Please indicate the max length of your password: ''' ).strip() ) snake_case_ = input( '''Please indicate the characters that must be in your password: ''' ).strip() print('''Password generated:''' , password_generator(SCREAMING_SNAKE_CASE__ ) ) print( '''Alternative Password generated:''' , alternative_password_generator(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , ) print('''[If you are thinking of using this passsword, You better save it.]''' ) if __name__ == "__main__": main()
8
import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return x + 2 class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Optional[Any] ) ->int: snake_case_ = '''x = 3''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result == 3 self.assertDictEqual(_UpperCamelCase , {'''x''': 3} ) snake_case_ = '''x = y''' snake_case_ = {'''y''': 5} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 5, '''y''': 5} ) def snake_case__( self : Dict ) ->Optional[int]: snake_case_ = '''y = add_two(x)''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} ) # Won't work without the tool with CaptureStdout() as out: snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result is None assert "tried to execute add_two" in out.out def snake_case__( self : Union[str, Any] ) ->Dict: snake_case_ = '''x = 3''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result == 3 self.assertDictEqual(_UpperCamelCase , {'''x''': 3} ) def snake_case__( self : Optional[int] ) ->Optional[int]: snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} ) self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} ) def snake_case__( self : Dict ) ->str: snake_case_ = '''x = 3\ny = 5''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} ) def snake_case__( self : str ) ->Tuple: snake_case_ = '''text = f\'This is x: {x}.\'''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''text''': '''This is x: 3.'''} ) def snake_case__( self : Optional[Any] ) ->List[str]: snake_case_ = '''if x <= 3:\n y = 2\nelse:\n y = 5''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 2} ) snake_case_ = {'''x''': 8} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 8, '''y''': 5} ) def snake_case__( self : str ) ->str: snake_case_ = '''test_list = [x, add_two(x)]''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , [3, 5] ) self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} ) def snake_case__( self : Any ) ->List[Any]: snake_case_ = '''y = x''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result == 3 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 3} ) def snake_case__( self : Optional[int] ) ->Dict: snake_case_ = '''test_list = [x, add_two(x)]\ntest_list[1]''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} ) snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} ) def snake_case__( self : Optional[Any] ) ->int: snake_case_ = '''x = 0\nfor i in range(3):\n x = i''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {'''range''': range} , state=_UpperCamelCase ) assert result == 2 self.assertDictEqual(_UpperCamelCase , {'''x''': 2, '''i''': 2} )
8
1
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if index == r: for j in range(SCREAMING_SNAKE_CASE__ ): print(data[j] , end=''' ''' ) print(''' ''' ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location snake_case_ = arr[i] combination_util(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 , SCREAMING_SNAKE_CASE__ , i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # A temporary array to store all combination one by one snake_case_ = [0] * r # Print all combination using temporary array 'data[]' combination_util(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , 0 ) if __name__ == "__main__": # Driver code to check the function above lowerCAmelCase_ = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
8
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Any , _UpperCamelCase : Any , _UpperCamelCase : Tuple ) ->List[Any]: return f'''gaussian_noise_s={seed}_shape={'_'.join([str(_UpperCamelCase ) for s in shape] )}.npy''' def snake_case__( self : Any ) ->List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() def snake_case__( self : int , _UpperCamelCase : Union[str, Any]=0 , _UpperCamelCase : int=(4, 4, 6_4, 6_4) , _UpperCamelCase : Optional[int]=False ) ->Tuple: snake_case_ = jnp.bfloataa if fpaa else jnp.floataa snake_case_ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase , _UpperCamelCase ) ) , dtype=_UpperCamelCase ) return image def snake_case__( self : List[Any] , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Optional[int]="CompVis/stable-diffusion-v1-4" ) ->Optional[Any]: snake_case_ = jnp.bfloataa if fpaa else jnp.floataa snake_case_ = '''bf16''' if fpaa else None snake_case_, snake_case_ = FlaxUNetaDConditionModel.from_pretrained( _UpperCamelCase , subfolder='''unet''' , dtype=_UpperCamelCase , revision=_UpperCamelCase ) return model, params def snake_case__( self : Dict , _UpperCamelCase : List[Any]=0 , _UpperCamelCase : Tuple=(4, 7_7, 7_6_8) , _UpperCamelCase : List[Any]=False ) ->int: snake_case_ = jnp.bfloataa if fpaa else jnp.floataa snake_case_ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase , _UpperCamelCase ) ) , dtype=_UpperCamelCase ) return hidden_states @parameterized.expand( [ # fmt: off [8_3, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], [1_7, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], [3, 1_0_0_0, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], # fmt: on ] ) def snake_case__( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] ) ->Union[str, Any]: snake_case_, snake_case_ = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=_UpperCamelCase ) snake_case_ = self.get_latents(_UpperCamelCase , fpaa=_UpperCamelCase ) snake_case_ = self.get_encoder_hidden_states(_UpperCamelCase , fpaa=_UpperCamelCase ) snake_case_ = model.apply( {'''params''': params} , _UpperCamelCase , jnp.array(_UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCamelCase , ).sample assert sample.shape == latents.shape snake_case_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) snake_case_ = jnp.array(_UpperCamelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [8_3, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], [1_7, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], [3, 1_0_0_0, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], # fmt: on ] ) def snake_case__( self : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str ) ->Dict: snake_case_, snake_case_ = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=_UpperCamelCase ) snake_case_ = self.get_latents(_UpperCamelCase , shape=(4, 4, 9_6, 9_6) , fpaa=_UpperCamelCase ) snake_case_ = self.get_encoder_hidden_states(_UpperCamelCase , shape=(4, 7_7, 1_0_2_4) , fpaa=_UpperCamelCase ) snake_case_ = model.apply( {'''params''': params} , _UpperCamelCase , jnp.array(_UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCamelCase , ).sample assert sample.shape == latents.shape snake_case_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) snake_case_ = jnp.array(_UpperCamelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-2 )
8
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase_ = { '''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''], '''tokenization_ctrl''': ['''CTRLTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CTRLForSequenceClassification''', '''CTRLLMHeadModel''', '''CTRLModel''', '''CTRLPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFCTRLForSequenceClassification''', '''TFCTRLLMHeadModel''', '''TFCTRLModel''', '''TFCTRLPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
8
import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def __SCREAMING_SNAKE_CASE (*SCREAMING_SNAKE_CASE__ ): if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = list(SCREAMING_SNAKE_CASE__ ) for i in range(len(SCREAMING_SNAKE_CASE__ ) ): snake_case_ = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = [ '''CUDA out of memory.''', # CUDA OOM '''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU '''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM ] if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 128 ): if function is None: return functools.partial(SCREAMING_SNAKE_CASE__ , starting_batch_size=SCREAMING_SNAKE_CASE__ ) snake_case_ = starting_batch_size def decorator(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() snake_case_ = list(inspect.signature(SCREAMING_SNAKE_CASE__ ).parameters.keys() ) # Guard against user error if len(SCREAMING_SNAKE_CASE__ ) < (len(SCREAMING_SNAKE_CASE__ ) + 1): snake_case_ = ''', '''.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( F'''Batch size was passed into `{function.__name__}` as the first argument when called.''' F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' ) while True: if batch_size == 0: raise RuntimeError('''No executable batch size found, reached zero.''' ) try: return function(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) except Exception as e: if should_reduce_batch_size(SCREAMING_SNAKE_CASE__ ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
8
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase_ = { '''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''BloomTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BloomForCausalLM''', '''BloomModel''', '''BloomPreTrainedModel''', '''BloomForSequenceClassification''', '''BloomForTokenClassification''', '''BloomForQuestionAnswering''', ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
8
from __future__ import annotations def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return [ord(SCREAMING_SNAKE_CASE__ ) - 96 for elem in plain] def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return "".join(chr(elem + 96 ) for elem in encoded ) def __SCREAMING_SNAKE_CASE (): snake_case_ = encode(input('''-> ''' ).strip().lower() ) print('''Encoded: ''' , SCREAMING_SNAKE_CASE__ ) print('''Decoded:''' , decode(SCREAMING_SNAKE_CASE__ ) ) if __name__ == "__main__": main()
8
1
import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('''>=''', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType lowerCAmelCase_ = get_logger(__name__) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ): os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) with FSDP.state_dict_type( SCREAMING_SNAKE_CASE__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): snake_case_ = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: snake_case_ = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin''' snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if accelerator.process_index == 0: logger.info(F'''Saving model to {output_model_file}''' ) torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) logger.info(F'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: snake_case_ = ( F'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) logger.info(F'''Saving model to {output_model_file}''' ) torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) logger.info(F'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''{MODEL_NAME}_{model_index}''' ) os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) logger.info(F'''Saving model to {ckpt_dir}''' ) snake_case_ = {'''model''': state_dict} dist_cp.save_state_dict( state_dict=SCREAMING_SNAKE_CASE__ , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE__ ) , planner=DefaultSavePlanner() , ) logger.info(F'''Model saved to {ckpt_dir}''' ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( SCREAMING_SNAKE_CASE__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(SCREAMING_SNAKE_CASE__ ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( '''Set the `sync_module_states` flag to `True` so that model states are synced across processes when ''' '''initializing FSDP object''' ) return snake_case_ = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin''' snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) logger.info(F'''Loading model from {input_model_file}''' ) snake_case_ = torch.load(SCREAMING_SNAKE_CASE__ ) logger.info(F'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: snake_case_ = ( F'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) logger.info(F'''Loading model from {input_model_file}''' ) snake_case_ = torch.load(SCREAMING_SNAKE_CASE__ ) logger.info(F'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: snake_case_ = ( os.path.join(SCREAMING_SNAKE_CASE__ , F'''{MODEL_NAME}_{model_index}''' ) if F'''{MODEL_NAME}''' not in input_dir else input_dir ) logger.info(F'''Loading model from {ckpt_dir}''' ) snake_case_ = {'''model''': model.state_dict()} dist_cp.load_state_dict( state_dict=SCREAMING_SNAKE_CASE__ , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE__ ) , planner=DefaultLoadPlanner() , ) snake_case_ = state_dict['''model'''] logger.info(F'''Model loaded from {ckpt_dir}''' ) model.load_state_dict(SCREAMING_SNAKE_CASE__ ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ): os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) with FSDP.state_dict_type( SCREAMING_SNAKE_CASE__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): snake_case_ = FSDP.optim_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: snake_case_ = ( F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' ) torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) logger.info(F'''Optimizer state saved in {output_optimizer_file}''' ) else: snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''{OPTIMIZER_NAME}_{optimizer_index}''' ) os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) logger.info(F'''Saving Optimizer state to {ckpt_dir}''' ) dist_cp.save_state_dict( state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE__ ) , planner=DefaultSavePlanner() , ) logger.info(F'''Optimizer state saved in {ckpt_dir}''' ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( SCREAMING_SNAKE_CASE__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: snake_case_ = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: snake_case_ = ( F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' ) snake_case_ = torch.load(SCREAMING_SNAKE_CASE__ ) logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' ) else: snake_case_ = ( os.path.join(SCREAMING_SNAKE_CASE__ , F'''{OPTIMIZER_NAME}_{optimizer_index}''' ) if F'''{OPTIMIZER_NAME}''' not in input_dir else input_dir ) logger.info(F'''Loading Optimizer from {ckpt_dir}''' ) snake_case_ = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE__ ) , ) snake_case_ = optim_state['''optimizer'''] logger.info(F'''Optimizer loaded from {ckpt_dir}''' ) snake_case_ = FSDP.optim_state_dict_to_load(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) optimizer.load_state_dict(SCREAMING_SNAKE_CASE__ )
8
import math def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(SCREAMING_SNAKE_CASE__ ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError('''This should never happen''' ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. lowerCAmelCase_ = '''Enter the base and the power separated by a comma: ''' lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''',''')) lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''',''')) # We find the log of each number, using the function res(), which takes two # arguments. lowerCAmelCase_ = res(xa, ya) lowerCAmelCase_ = res(xa, ya) # We check for the largest number if resa > resa: print('''Largest number is''', xa, '''^''', ya) elif resa > resa: print('''Largest number is''', xa, '''^''', ya) else: print('''Both are equal''')
8
1
import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return x + 2 class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Optional[Any] ) ->int: snake_case_ = '''x = 3''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result == 3 self.assertDictEqual(_UpperCamelCase , {'''x''': 3} ) snake_case_ = '''x = y''' snake_case_ = {'''y''': 5} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 5, '''y''': 5} ) def snake_case__( self : Dict ) ->Optional[int]: snake_case_ = '''y = add_two(x)''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} ) # Won't work without the tool with CaptureStdout() as out: snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result is None assert "tried to execute add_two" in out.out def snake_case__( self : Union[str, Any] ) ->Dict: snake_case_ = '''x = 3''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result == 3 self.assertDictEqual(_UpperCamelCase , {'''x''': 3} ) def snake_case__( self : Optional[int] ) ->Optional[int]: snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} ) self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} ) def snake_case__( self : Dict ) ->str: snake_case_ = '''x = 3\ny = 5''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} ) def snake_case__( self : str ) ->Tuple: snake_case_ = '''text = f\'This is x: {x}.\'''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''text''': '''This is x: 3.'''} ) def snake_case__( self : Optional[Any] ) ->List[str]: snake_case_ = '''if x <= 3:\n y = 2\nelse:\n y = 5''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 2} ) snake_case_ = {'''x''': 8} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 8, '''y''': 5} ) def snake_case__( self : str ) ->str: snake_case_ = '''test_list = [x, add_two(x)]''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , [3, 5] ) self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} ) def snake_case__( self : Any ) ->List[Any]: snake_case_ = '''y = x''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result == 3 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 3} ) def snake_case__( self : Optional[int] ) ->Dict: snake_case_ = '''test_list = [x, add_two(x)]\ntest_list[1]''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} ) snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} ) def snake_case__( self : Optional[Any] ) ->int: snake_case_ = '''x = 0\nfor i in range(3):\n x = i''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {'''range''': range} , state=_UpperCamelCase ) assert result == 2 self.assertDictEqual(_UpperCamelCase , {'''x''': 2, '''i''': 2} )
8
import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'''vocab_file''': '''spiece.model'''} lowerCAmelCase_ = { '''vocab_file''': { '''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''', '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model''' ), } } lowerCAmelCase_ = { '''google/bigbird-roberta-base''': 40_96, '''google/bigbird-roberta-large''': 40_96, '''google/bigbird-base-trivia-itc''': 40_96, } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : List[Any] = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE : List[int] = [] def __init__( self : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict="<unk>" , _UpperCamelCase : List[str]="<s>" , _UpperCamelCase : Tuple="</s>" , _UpperCamelCase : Any="<pad>" , _UpperCamelCase : Any="[SEP]" , _UpperCamelCase : Optional[Any]="[MASK]" , _UpperCamelCase : Any="[CLS]" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Dict , ) ->None: snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else bos_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else eos_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else unk_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else pad_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else cls_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else sep_token # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , sep_token=_UpperCamelCase , mask_token=_UpperCamelCase , cls_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , ) snake_case_ = vocab_file snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCamelCase ) @property def snake_case__( self : str ) ->List[Any]: return self.sp_model.get_piece_size() def snake_case__( self : int ) ->Union[str, Any]: snake_case_ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Tuple ) ->Any: snake_case_ = self.__dict__.copy() snake_case_ = None return state def __setstate__( self : str , _UpperCamelCase : List[Any] ) ->List[str]: snake_case_ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): snake_case_ = {} snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def snake_case__( self : Optional[int] , _UpperCamelCase : str ) ->List[str]: return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase ) def snake_case__( self : str , _UpperCamelCase : List[str] ) ->Tuple: return self.sp_model.piece_to_id(_UpperCamelCase ) def snake_case__( self : Union[str, Any] , _UpperCamelCase : str ) ->List[Any]: snake_case_ = self.sp_model.IdToPiece(_UpperCamelCase ) return token def snake_case__( self : Dict , _UpperCamelCase : Optional[int] ) ->List[str]: snake_case_ = [] snake_case_ = '''''' snake_case_ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_UpperCamelCase ) + token snake_case_ = True snake_case_ = [] else: current_sub_tokens.append(_UpperCamelCase ) snake_case_ = False out_string += self.sp_model.decode(_UpperCamelCase ) return out_string.strip() def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : bool = False , _UpperCamelCase : bool = None , _UpperCamelCase : bool = True , **_UpperCamelCase : List[str] , ) ->str: snake_case_ = kwargs.pop('''use_source_tokenizer''' , _UpperCamelCase ) snake_case_ = self.convert_ids_to_tokens(_UpperCamelCase , skip_special_tokens=_UpperCamelCase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 snake_case_ = [] snake_case_ = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) ) snake_case_ = [] sub_texts.append(_UpperCamelCase ) else: current_sub_text.append(_UpperCamelCase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: snake_case_ = re.sub(R''' (\[(MASK|SEP)\])''' , R'''\1''' , ''' '''.join(_UpperCamelCase ) ) else: snake_case_ = ''''''.join(_UpperCamelCase ) snake_case_ = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: snake_case_ = self.clean_up_tokenization(_UpperCamelCase ) return clean_text else: return text def snake_case__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]: if not os.path.isdir(_UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case_ = os.path.join( _UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCamelCase , '''wb''' ) as fi: snake_case_ = self.sp_model.serialized_model_proto() fi.write(_UpperCamelCase ) return (out_vocab_file,) def snake_case__( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def snake_case__( self : List[str] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) ->List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCamelCase )) + [1] return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1] def snake_case__( self : List[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
8
1
lowerCAmelCase_ = 6_55_21 def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = 1 snake_case_ = 0 for plain_chr in plain_text: snake_case_ = (a + ord(SCREAMING_SNAKE_CASE__ )) % MOD_ADLER snake_case_ = (b + a) % MOD_ADLER return (b << 16) | a
8
from __future__ import annotations from collections.abc import Generator def __SCREAMING_SNAKE_CASE (): snake_case_ = {} snake_case_ = 2 while True: snake_case_ = factor_map.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if factor: snake_case_ = factor + prime while x in factor_map: x += factor snake_case_ = factor else: snake_case_ = prime yield prime prime += 1 def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 1E10 ): snake_case_ = sieve() snake_case_ = 1 while True: snake_case_ = next(SCREAMING_SNAKE_CASE__ ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(SCREAMING_SNAKE_CASE__ ) n += 2 if __name__ == "__main__": print(solution())
8
1
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging lowerCAmelCase_ = logging.get_logger(__name__) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = set() snake_case_ = [] def parse_line(SCREAMING_SNAKE_CASE__ ): for line in fp: if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = line.decode('''UTF-8''' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(''' ''' ): # process a single warning and move it to `selected_warnings`. if len(SCREAMING_SNAKE_CASE__ ) > 0: snake_case_ = '''\n'''.join(SCREAMING_SNAKE_CASE__ ) # Only keep the warnings specified in `targets` if any(F''': {x}: ''' in warning for x in targets ): selected_warnings.add(SCREAMING_SNAKE_CASE__ ) buffer.clear() continue else: snake_case_ = line.strip() buffer.append(SCREAMING_SNAKE_CASE__ ) if from_gh: for filename in os.listdir(SCREAMING_SNAKE_CASE__ ): snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): # read the file if filename != "warnings.txt": continue with open(SCREAMING_SNAKE_CASE__ ) as fp: parse_line(SCREAMING_SNAKE_CASE__ ) else: try: with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ ) as z: for filename in z.namelist(): if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): # read the file if filename != "warnings.txt": continue with z.open(SCREAMING_SNAKE_CASE__ ) as fp: parse_line(SCREAMING_SNAKE_CASE__ ) except Exception: logger.warning( F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' ) return selected_warnings def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = set() snake_case_ = [os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for p in os.listdir(SCREAMING_SNAKE_CASE__ ) if (p.endswith('''.zip''' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) return selected_warnings if __name__ == "__main__": def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return values.split(''',''' ) lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''') parser.add_argument( '''--output_dir''', type=str, required=True, help='''Where to store the downloaded artifacts and other result files.''', ) parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''') # optional parameters parser.add_argument( '''--targets''', default='''DeprecationWarning,UserWarning,FutureWarning''', type=list_str, help='''Comma-separated list of target warning(s) which we want to extract.''', ) parser.add_argument( '''--from_gh''', action='''store_true''', help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''', ) lowerCAmelCase_ = parser.parse_args() lowerCAmelCase_ = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links lowerCAmelCase_ = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print('''=''' * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts lowerCAmelCase_ = extract_warnings(args.output_dir, args.targets) lowerCAmelCase_ = sorted(selected_warnings) with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
8
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OPTForCausalLM''', '''OPTModel''', '''OPTPreTrainedModel''', '''OPTForSequenceClassification''', '''OPTForQuestionAnswering''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''FlaxOPTForCausalLM''', '''FlaxOPTModel''', '''FlaxOPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
8
1
import shutil import tempfile import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, NllbTokenizer, NllbTokenizerFast, is_torch_available, ) from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right lowerCAmelCase_ = 25_60_47 lowerCAmelCase_ = 25_61_45 @require_sentencepiece @require_tokenizers class snake_case_ ( __A , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = NllbTokenizer SCREAMING_SNAKE_CASE : Any = NllbTokenizerFast SCREAMING_SNAKE_CASE : str = True SCREAMING_SNAKE_CASE : List[str] = True SCREAMING_SNAKE_CASE : Union[str, Any] = {} def snake_case__( self : List[str] ) ->Union[str, Any]: super().setUp() # We have a SentencePiece fixture for testing snake_case_ = NllbTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case__( self : List[str] ) ->int: snake_case_ = NllbTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase ) snake_case_ = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_UpperCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , ) snake_case_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _UpperCamelCase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) snake_case_ = tokenizer.convert_tokens_to_ids(_UpperCamelCase ) self.assertListEqual( _UpperCamelCase , [ value + tokenizer.fairseq_offset for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4] ] , ) snake_case_ = tokenizer.convert_ids_to_tokens(_UpperCamelCase ) self.assertListEqual( _UpperCamelCase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def snake_case__( self : List[str] ) ->Any: snake_case_ = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-nllb''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase ) snake_case_ = self.tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase ) snake_case_ = tempfile.mkdtemp() snake_case_ = tokenizer_r.save_pretrained(_UpperCamelCase ) snake_case_ = tokenizer_p.save_pretrained(_UpperCamelCase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) snake_case_ = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(_UpperCamelCase , _UpperCamelCase ) # Checks everything loads correctly in the same way snake_case_ = tokenizer_r.from_pretrained(_UpperCamelCase ) snake_case_ = tokenizer_p.from_pretrained(_UpperCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_UpperCamelCase , _UpperCamelCase ) ) shutil.rmtree(_UpperCamelCase ) # Save tokenizer rust, legacy_format=True snake_case_ = tempfile.mkdtemp() snake_case_ = tokenizer_r.save_pretrained(_UpperCamelCase , legacy_format=_UpperCamelCase ) snake_case_ = tokenizer_p.save_pretrained(_UpperCamelCase ) # Checks it save with the same files self.assertSequenceEqual(_UpperCamelCase , _UpperCamelCase ) # Checks everything loads correctly in the same way snake_case_ = tokenizer_r.from_pretrained(_UpperCamelCase ) snake_case_ = tokenizer_p.from_pretrained(_UpperCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_UpperCamelCase , _UpperCamelCase ) ) shutil.rmtree(_UpperCamelCase ) # Save tokenizer rust, legacy_format=False snake_case_ = tempfile.mkdtemp() snake_case_ = tokenizer_r.save_pretrained(_UpperCamelCase , legacy_format=_UpperCamelCase ) snake_case_ = tokenizer_p.save_pretrained(_UpperCamelCase ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way snake_case_ = tokenizer_r.from_pretrained(_UpperCamelCase ) snake_case_ = tokenizer_p.from_pretrained(_UpperCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_UpperCamelCase , _UpperCamelCase ) ) shutil.rmtree(_UpperCamelCase ) @require_torch def snake_case__( self : Any ) ->Optional[Any]: if not self.test_seqaseq: return snake_case_ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Longer text that will definitely require truncation. snake_case_ = [ ''' UN Chief Says There Is No Military Solution in Syria''', ''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for''' ''' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons''' ''' will only worsen the violence and misery for millions of people.''', ] snake_case_ = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', '''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al''' ''' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi''' ''' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''', ] try: snake_case_ = tokenizer.prepare_seqaseq_batch( src_texts=_UpperCamelCase , tgt_texts=_UpperCamelCase , max_length=3 , max_target_length=1_0 , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' , ) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 1_0 ) # max_target_length will default to max_length if not specified snake_case_ = tokenizer.prepare_seqaseq_batch( _UpperCamelCase , tgt_texts=_UpperCamelCase , max_length=3 , return_tensors='''pt''' ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 3 ) snake_case_ = tokenizer.prepare_seqaseq_batch( src_texts=_UpperCamelCase , max_length=3 , max_target_length=1_0 , return_tensors='''pt''' ) self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 ) self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 ) self.assertNotIn('''decoder_input_ids''' , _UpperCamelCase ) @unittest.skip('''Unfortunately way too slow to build a BPE with SentencePiece.''' ) def snake_case__( self : List[str] ) ->Tuple: pass def snake_case__( self : Any ) ->Any: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ = [AddedToken('''<special>''' , lstrip=_UpperCamelCase )] snake_case_ = self.rust_tokenizer_class.from_pretrained( _UpperCamelCase , additional_special_tokens=_UpperCamelCase , **_UpperCamelCase ) snake_case_ = tokenizer_r.encode('''Hey this is a <special> token''' ) snake_case_ = tokenizer_r.encode('''<special>''' , add_special_tokens=_UpperCamelCase )[0] self.assertTrue(special_token_id in r_output ) if self.test_slow_tokenizer: snake_case_ = self.rust_tokenizer_class.from_pretrained( _UpperCamelCase , additional_special_tokens=_UpperCamelCase , **_UpperCamelCase , ) snake_case_ = self.tokenizer_class.from_pretrained( _UpperCamelCase , additional_special_tokens=_UpperCamelCase , **_UpperCamelCase ) snake_case_ = tokenizer_p.encode('''Hey this is a <special> token''' ) snake_case_ = tokenizer_cr.encode('''Hey this is a <special> token''' ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertTrue(special_token_id in p_output ) self.assertTrue(special_token_id in cr_output ) @require_torch @require_sentencepiece @require_tokenizers class snake_case_ ( unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = "facebook/nllb-200-distilled-600M" SCREAMING_SNAKE_CASE : int = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] SCREAMING_SNAKE_CASE : Dict = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] SCREAMING_SNAKE_CASE : Dict = [ 256047, 16297, 134408, 8165, 248066, 14734, 950, 1135, 105721, 3573, 83, 27352, 108, 49486, 2, ] @classmethod def snake_case__( cls : Optional[int] ) ->int: snake_case_ = NllbTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' ) snake_case_ = 1 return cls def snake_case__( self : Optional[int] ) ->Any: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Arab'''] , 2_5_6_0_0_1 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Latn'''] , 2_5_6_0_0_2 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''fra_Latn'''] , 2_5_6_0_5_7 ) def snake_case__( self : str ) ->Tuple: snake_case_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , _UpperCamelCase ) def snake_case__( self : Any ) ->List[str]: self.assertIn(_UpperCamelCase , self.tokenizer.all_special_ids ) # fmt: off snake_case_ = [RO_CODE, 4_2_5_4, 9_8_0_6_8, 1_1_2_9_2_3, 3_9_0_7_2, 3_9_0_9, 7_1_3, 1_0_2_7_6_7, 2_6, 1_7_3_1_4, 3_5_6_4_2, 1_4_6_8_3, 3_3_1_1_8, 2_0_2_2, 6_6_9_8_7, 2, 2_5_6_0_4_7] # fmt: on snake_case_ = self.tokenizer.decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase ) snake_case_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertNotIn(self.tokenizer.eos_token , _UpperCamelCase ) def snake_case__( self : Dict ) ->str: snake_case_ = ['''this is gunna be a long sentence ''' * 2_0] assert isinstance(src_text[0] , _UpperCamelCase ) snake_case_ = 1_0 snake_case_ = self.tokenizer(_UpperCamelCase , max_length=_UpperCamelCase , truncation=_UpperCamelCase ).input_ids[0] self.assertEqual(ids[-1] , 2 ) self.assertEqual(ids[0] , _UpperCamelCase ) self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase ) def snake_case__( self : Tuple ) ->Union[str, Any]: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [2_5_6_2_0_3, 3] ) def snake_case__( self : Dict ) ->List[Any]: snake_case_ = tempfile.mkdtemp() snake_case_ = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(_UpperCamelCase ) snake_case_ = NllbTokenizer.from_pretrained(_UpperCamelCase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _UpperCamelCase ) @require_torch def snake_case__( self : str ) ->Dict: snake_case_ = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) snake_case_ = shift_tokens_right( batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['''ron_Latn'''] ) self.assertIsInstance(_UpperCamelCase , _UpperCamelCase ) self.assertEqual((2, 1_5) , batch.input_ids.shape ) self.assertEqual((2, 1_5) , batch.attention_mask.shape ) snake_case_ = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , batch.decoder_input_ids[0, 0] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def snake_case__( self : List[Any] ) ->Any: snake_case_ = self.tokenizer(self.src_text , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=3 , return_tensors='''pt''' ) snake_case_ = self.tokenizer( text_target=self.tgt_text , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=1_0 , return_tensors='''pt''' ) snake_case_ = targets['''input_ids'''] snake_case_ = shift_tokens_right( _UpperCamelCase , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 ) @require_torch def snake_case__( self : Optional[int] ) ->Union[str, Any]: snake_case_ = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' ) self.assertEqual( nested_simplify(_UpperCamelCase ) , { # A, test, EOS, en_XX '''input_ids''': [[2_5_6_0_4_7, 7_0, 7_3_5_6, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 2_5_6_0_5_7, } , ) @require_torch def snake_case__( self : List[str] ) ->str: snake_case_ = True snake_case_ = self.tokenizer( '''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' ) self.assertEqual( inputs.input_ids , [1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2, 2_5_6_0_4_7] ) snake_case_ = False snake_case_ = self.tokenizer( '''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' ) self.assertEqual( inputs.input_ids , [2_5_6_0_4_7, 1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2] )
8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = "philschmid/bart-large-cnn-samsum" SCREAMING_SNAKE_CASE : Tuple = ( "This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, " "and returns a summary of the text." ) SCREAMING_SNAKE_CASE : str = "summarizer" SCREAMING_SNAKE_CASE : str = AutoTokenizer SCREAMING_SNAKE_CASE : str = AutoModelForSeqaSeqLM SCREAMING_SNAKE_CASE : Optional[int] = ["text"] SCREAMING_SNAKE_CASE : Optional[int] = ["text"] def snake_case__( self : str , _UpperCamelCase : int ) ->Optional[int]: return self.pre_processor(_UpperCamelCase , return_tensors='''pt''' , truncation=_UpperCamelCase ) def snake_case__( self : Tuple , _UpperCamelCase : Optional[int] ) ->Tuple: return self.model.generate(**_UpperCamelCase )[0] def snake_case__( self : Optional[Any] , _UpperCamelCase : Optional[int] ) ->Any: return self.pre_processor.decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
8
1
from sklearn.metrics import mean_squared_error import datasets lowerCAmelCase_ = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' lowerCAmelCase_ = '''\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. ''' lowerCAmelCase_ = ''' Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. "raw_values" : Returns a full set of errors in case of multioutput input. "uniform_average" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric("mse") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {\'mse\': 0.6123724356957945} If you\'re using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric("mse", "multilist") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {\'mse\': array([0.41666667, 1. ])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case_ ( datasets.Metric ): '''simple docstring''' def snake_case__( self : Optional[int] ) ->List[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def snake_case__( self : List[Any] ) ->Optional[int]: if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''' ) ), "references": datasets.Sequence(datasets.Value('''float''' ) ), } else: return { "predictions": datasets.Value('''float''' ), "references": datasets.Value('''float''' ), } def snake_case__( self : int , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any=None , _UpperCamelCase : Optional[int]="uniform_average" , _UpperCamelCase : Tuple=True ) ->Tuple: snake_case_ = mean_squared_error( _UpperCamelCase , _UpperCamelCase , sample_weight=_UpperCamelCase , multioutput=_UpperCamelCase , squared=_UpperCamelCase ) return {"mse": mse}
8
from collections import deque from .hash_table import HashTable class snake_case_ ( __A ): '''simple docstring''' def __init__( self : int , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->Tuple: super().__init__(*_UpperCamelCase , **_UpperCamelCase ) def snake_case__( self : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Dict ) ->Tuple: snake_case_ = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(_UpperCamelCase ) snake_case_ = self.values[key] def snake_case__( self : List[Any] ) ->str: return ( sum(self.charge_factor - len(_UpperCamelCase ) for slot in self.values ) / self.size_table * self.charge_factor ) def snake_case__( self : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int]=None ) ->str: if not ( len(self.values[key] ) == self.charge_factor and self.values.count(_UpperCamelCase ) == 0 ): return key return super()._collision_resolution(_UpperCamelCase , _UpperCamelCase )
8
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''PLBartTokenizer'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PLBartForCausalLM''', '''PLBartForConditionalGeneration''', '''PLBartForSequenceClassification''', '''PLBartModel''', '''PLBartPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
8
from __future__ import annotations def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = len(SCREAMING_SNAKE_CASE__ ) # We need to create solution object to save path. snake_case_ = [[0 for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )] snake_case_ = run_maze(SCREAMING_SNAKE_CASE__ , 0 , 0 , SCREAMING_SNAKE_CASE__ ) if solved: print('''\n'''.join(str(SCREAMING_SNAKE_CASE__ ) for row in solutions ) ) else: print('''No solution exists!''' ) return solved def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = len(SCREAMING_SNAKE_CASE__ ) # Final check point. if i == j == (size - 1): snake_case_ = 1 return True snake_case_ = (not i < 0) and (not j < 0) # Check lower bounds snake_case_ = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. snake_case_ = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited snake_case_ = 1 # check for directions if ( run_maze(SCREAMING_SNAKE_CASE__ , i + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j + 1 , SCREAMING_SNAKE_CASE__ ) or run_maze(SCREAMING_SNAKE_CASE__ , i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j - 1 , SCREAMING_SNAKE_CASE__ ) ): return True snake_case_ = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
8
1
from __future__ import annotations from math import gcd def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 2 , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = 3 , ): # A value less than 2 can cause an infinite loop in the algorithm. if num < 2: raise ValueError('''The input value cannot be less than 2''' ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int: return (pow(SCREAMING_SNAKE_CASE__ , 2 ) + step) % modulus for _ in range(SCREAMING_SNAKE_CASE__ ): # These track the position within the cycle detection logic. snake_case_ = seed snake_case_ = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. snake_case_ = rand_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case_ = rand_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case_ = rand_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. snake_case_ = gcd(hare - tortoise , SCREAMING_SNAKE_CASE__ ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. snake_case_ = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( '''num''', type=int, help='''The value to find a divisor of''', ) parser.add_argument( '''--attempts''', type=int, default=3, help='''The number of attempts before giving up''', ) lowerCAmelCase_ = parser.parse_args() lowerCAmelCase_ = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(f"""{args.num} is probably prime""") else: lowerCAmelCase_ = args.num // divisor print(f"""{args.num} = {divisor} * {quotient}""")
8
from decimal import Decimal, getcontext from math import ceil, factorial def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise TypeError('''Undefined for non-integers''' ) elif precision < 1: raise ValueError('''Undefined for non-natural numbers''' ) snake_case_ = precision snake_case_ = ceil(precision / 14 ) snake_case_ = 426880 * Decimal(10005 ).sqrt() snake_case_ = 1 snake_case_ = 13591409 snake_case_ = Decimal(SCREAMING_SNAKE_CASE__ ) for k in range(1 , SCREAMING_SNAKE_CASE__ ): snake_case_ = factorial(6 * k ) // (factorial(3 * k ) * factorial(SCREAMING_SNAKE_CASE__ ) ** 3) linear_term += 545140134 exponential_term *= -262537412640768000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": lowerCAmelCase_ = 50 print(f"""The first {n} digits of pi is: {pi(n)}""")
8
1
from decimal import Decimal, getcontext from math import ceil, factorial def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise TypeError('''Undefined for non-integers''' ) elif precision < 1: raise ValueError('''Undefined for non-natural numbers''' ) snake_case_ = precision snake_case_ = ceil(precision / 14 ) snake_case_ = 426880 * Decimal(10005 ).sqrt() snake_case_ = 1 snake_case_ = 13591409 snake_case_ = Decimal(SCREAMING_SNAKE_CASE__ ) for k in range(1 , SCREAMING_SNAKE_CASE__ ): snake_case_ = factorial(6 * k ) // (factorial(3 * k ) * factorial(SCREAMING_SNAKE_CASE__ ) ** 3) linear_term += 545140134 exponential_term *= -262537412640768000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": lowerCAmelCase_ = 50 print(f"""The first {n} digits of pi is: {pi(n)}""")
8
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class snake_case_ ( __A ): '''simple docstring''' def __init__( self : int , _UpperCamelCase : pyspark.sql.DataFrame , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : bool = True , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : str = None , _UpperCamelCase : bool = True , _UpperCamelCase : str = "arrow" , **_UpperCamelCase : Tuple , ) ->str: super().__init__( split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , **_UpperCamelCase , ) snake_case_ = load_from_cache_file snake_case_ = file_format snake_case_ = Spark( df=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , working_dir=_UpperCamelCase , **_UpperCamelCase , ) def snake_case__( self : int ) ->Tuple: if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) snake_case_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=_UpperCamelCase , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
8
1
import collections import os import re from pathlib import Path lowerCAmelCase_ = '''src/transformers''' # Matches is_xxx_available() lowerCAmelCase_ = re.compile(R'''is\_([a-z_]*)_available()''') # Catches a one-line _import_struct = {xxx} lowerCAmelCase_ = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowerCAmelCase_ = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''') # Catches a line if not is_foo_available lowerCAmelCase_ = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''') # Catches a line _import_struct["bla"].append("foo") lowerCAmelCase_ = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowerCAmelCase_ = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''') # Catches a line with an object between quotes and a comma: "MyModel", lowerCAmelCase_ = re.compile(R'''^\s+"([^"]+)",''') # Catches a line with objects between brackets only: ["foo", "bar"], lowerCAmelCase_ = re.compile(R'''^\s+\[([^\]]+)\]''') # Catches a line with from foo import bar, bla, boo lowerCAmelCase_ = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''') # Catches a line with try: lowerCAmelCase_ = re.compile(R'''^\s*try:''') # Catches a line with else: lowerCAmelCase_ = re.compile(R'''^\s*else:''') def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if _re_test_backend.search(SCREAMING_SNAKE_CASE__ ) is None: return None snake_case_ = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE__ )] backends.sort() return "_and_".join(SCREAMING_SNAKE_CASE__ ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): with open(SCREAMING_SNAKE_CASE__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: snake_case_ = f.readlines() snake_case_ = 0 while line_index < len(SCREAMING_SNAKE_CASE__ ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(SCREAMING_SNAKE_CASE__ ): return None # First grab the objects without a specific backend in _import_structure snake_case_ = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: snake_case_ = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE__ ): snake_case_ = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE__ ).groups()[0] snake_case_ = re.findall(R'''\[([^\]]+)\]''' , SCREAMING_SNAKE_CASE__ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue snake_case_ = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE__ ) if single_line_import_search is not None: snake_case_ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(SCREAMING_SNAKE_CASE__ ) > 0] objects.extend(SCREAMING_SNAKE_CASE__ ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 snake_case_ = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. snake_case_ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: snake_case_ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 snake_case_ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): snake_case_ = lines[line_index] if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE__ ) is not None: objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE__ ).groups()[0] ) elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE__ ) is not None: snake_case_ = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE__ ).groups()[0].split(''', ''' ) snake_case_ = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE__ ) > 0] objects.extend(SCREAMING_SNAKE_CASE__ ) elif _re_between_brackets.search(SCREAMING_SNAKE_CASE__ ) is not None: snake_case_ = _re_between_brackets.search(SCREAMING_SNAKE_CASE__ ).groups()[0].split(''', ''' ) snake_case_ = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE__ ) > 0] objects.extend(SCREAMING_SNAKE_CASE__ ) elif _re_quote_object.search(SCREAMING_SNAKE_CASE__ ) is not None: objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE__ ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 snake_case_ = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend snake_case_ = [] while ( line_index < len(SCREAMING_SNAKE_CASE__ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): snake_case_ = lines[line_index] snake_case_ = _re_import.search(SCREAMING_SNAKE_CASE__ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 snake_case_ = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(SCREAMING_SNAKE_CASE__ ): # If the line is an if is_backend_available, we grab all objects associated. snake_case_ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: snake_case_ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 snake_case_ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): snake_case_ = lines[line_index] snake_case_ = _re_import.search(SCREAMING_SNAKE_CASE__ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 snake_case_ = objects else: line_index += 1 return import_dict_objects, type_hint_objects def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): def find_duplicates(SCREAMING_SNAKE_CASE__ ): return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE__ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] snake_case_ = [] for key in import_dict_objects.keys(): snake_case_ = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) snake_case_ = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): snake_case_ = '''base imports''' if key == '''none''' else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def __SCREAMING_SNAKE_CASE (): snake_case_ = [] for root, _, files in os.walk(SCREAMING_SNAKE_CASE__ ): if "__init__.py" in files: snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''__init__.py''' ) snake_case_ = parse_init(SCREAMING_SNAKE_CASE__ ) if objects is not None: snake_case_ = analyze_results(*SCREAMING_SNAKE_CASE__ ) if len(SCREAMING_SNAKE_CASE__ ) > 0: snake_case_ = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append('''\n'''.join(SCREAMING_SNAKE_CASE__ ) ) if len(SCREAMING_SNAKE_CASE__ ) > 0: raise ValueError('''\n\n'''.join(SCREAMING_SNAKE_CASE__ ) ) def __SCREAMING_SNAKE_CASE (): snake_case_ = [] for path, directories, files in os.walk(SCREAMING_SNAKE_CASE__ ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(SCREAMING_SNAKE_CASE__ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(SCREAMING_SNAKE_CASE__ ) / folder).glob('''*.py''' ) ) ) == 0: continue snake_case_ = str((Path(SCREAMING_SNAKE_CASE__ ) / folder).relative_to(SCREAMING_SNAKE_CASE__ ) ) snake_case_ = short_path.replace(os.path.sep , '''.''' ) submodules.append(SCREAMING_SNAKE_CASE__ ) for fname in files: if fname == "__init__.py": continue snake_case_ = str((Path(SCREAMING_SNAKE_CASE__ ) / fname).relative_to(SCREAMING_SNAKE_CASE__ ) ) snake_case_ = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(SCREAMING_SNAKE_CASE__ ) return submodules lowerCAmelCase_ = [ '''convert_pytorch_checkpoint_to_tf2''', '''modeling_flax_pytorch_utils''', '''models.esm.openfold_utils''', ] def __SCREAMING_SNAKE_CASE (): # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import snake_case_ = direct_transformers_import(SCREAMING_SNAKE_CASE__ ) snake_case_ = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(SCREAMING_SNAKE_CASE__ , '''__init__.py''' ) , '''r''' ) as f: snake_case_ = f.read() import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , SCREAMING_SNAKE_CASE__ ) ) ) snake_case_ = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(SCREAMING_SNAKE_CASE__ ) > 0: snake_case_ = '''\n'''.join(F'''- {module}''' for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F'''{list_of_modules}\n''' '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
8
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable lowerCAmelCase_ = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''DPTFeatureExtractor'''] lowerCAmelCase_ = ['''DPTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DPTForDepthEstimation''', '''DPTForSemanticSegmentation''', '''DPTModel''', '''DPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
8
1
import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler lowerCAmelCase_ = 16 lowerCAmelCase_ = 32 def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return int(x / 2**20 ) class snake_case_ : '''simple docstring''' def __enter__( self : Any ) ->Optional[Any]: gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero snake_case_ = torch.cuda.memory_allocated() return self def __exit__( self : List[str] , *_UpperCamelCase : str ) ->List[Any]: gc.collect() torch.cuda.empty_cache() snake_case_ = torch.cuda.memory_allocated() snake_case_ = torch.cuda.max_memory_allocated() snake_case_ = bamb(self.end - self.begin ) snake_case_ = bamb(self.peak - self.begin ) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 16 , SCREAMING_SNAKE_CASE__ = "bert-base-cased" , SCREAMING_SNAKE_CASE__ = 320 , SCREAMING_SNAKE_CASE__ = 160 , ): snake_case_ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ) snake_case_ = load_dataset( '''glue''' , '''mrpc''' , split={'''train''': F'''train[:{n_train}]''', '''validation''': F'''validation[:{n_val}]'''} ) def tokenize_function(SCREAMING_SNAKE_CASE__ ): # max_length=None => use the model max length (it's actually the default) snake_case_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset snake_case_ = datasets.map( SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=SCREAMING_SNAKE_CASE__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library snake_case_ = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(SCREAMING_SNAKE_CASE__ ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. snake_case_ = DataLoader( tokenized_datasets['''train'''] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ ) snake_case_ = DataLoader( tokenized_datasets['''validation'''] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ ) return train_dataloader, eval_dataloader def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # Initialize accelerator snake_case_ = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs snake_case_ = config['''lr'''] snake_case_ = int(config['''num_epochs'''] ) snake_case_ = int(config['''seed'''] ) snake_case_ = int(config['''batch_size'''] ) snake_case_ = args.model_name_or_path set_seed(SCREAMING_SNAKE_CASE__ ) snake_case_, snake_case_ = get_dataloaders(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , args.n_train , args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) snake_case_ = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ ) # Instantiate optimizer snake_case_ = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) snake_case_ = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE__ ) if accelerator.state.deepspeed_plugin is not None: snake_case_ = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: snake_case_ = 1 snake_case_ = (len(SCREAMING_SNAKE_CASE__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): snake_case_ = get_linear_schedule_with_warmup( optimizer=SCREAMING_SNAKE_CASE__ , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE__ , ) else: snake_case_ = DummyScheduler(SCREAMING_SNAKE_CASE__ , total_num_steps=SCREAMING_SNAKE_CASE__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ = accelerator.prepare( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # We need to keep track of how many total steps we have iterated over snake_case_ = 0 # We also need to keep track of the stating epoch so files are named properly snake_case_ = 0 # Now we train the model snake_case_ = {} for epoch in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): snake_case_ = model(**SCREAMING_SNAKE_CASE__ ) snake_case_ = outputs.loss snake_case_ = loss / gradient_accumulation_steps accelerator.backward(SCREAMING_SNAKE_CASE__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) ) accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) ) accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) ) accelerator.print( '''Total Peak Memory consumed during the train (max): {}'''.format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) snake_case_ = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[F'''epoch-{epoch}'''] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __SCREAMING_SNAKE_CASE (): snake_case_ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''' , type=SCREAMING_SNAKE_CASE__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=SCREAMING_SNAKE_CASE__ , ) parser.add_argument( '''--output_dir''' , type=SCREAMING_SNAKE_CASE__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , ) parser.add_argument( '''--peak_memory_upper_bound''' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , ) parser.add_argument( '''--n_train''' , type=SCREAMING_SNAKE_CASE__ , default=320 , help='''Number of training examples to use.''' , ) parser.add_argument( '''--n_val''' , type=SCREAMING_SNAKE_CASE__ , default=160 , help='''Number of validation examples to use.''' , ) parser.add_argument( '''--num_epochs''' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='''Number of train epochs.''' , ) snake_case_ = parser.parse_args() snake_case_ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": main()
8
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer lowerCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase_ = { '''vocab_file''': { '''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''', }, '''tokenizer_file''': { '''unc-nlp/lxmert-base-uncased''': ( '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase_ = { '''unc-nlp/lxmert-base-uncased''': 5_12, } lowerCAmelCase_ = { '''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True}, } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : Any = LxmertTokenizer def __init__( self : Union[str, Any] , _UpperCamelCase : int=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Dict=True , _UpperCamelCase : Any="[UNK]" , _UpperCamelCase : Tuple="[SEP]" , _UpperCamelCase : List[Any]="[PAD]" , _UpperCamelCase : Union[str, Any]="[CLS]" , _UpperCamelCase : str="[MASK]" , _UpperCamelCase : List[str]=True , _UpperCamelCase : List[str]=None , **_UpperCamelCase : List[str] , ) ->Any: super().__init__( _UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , ) snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _UpperCamelCase ) != do_lower_case or normalizer_state.get('''strip_accents''' , _UpperCamelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _UpperCamelCase ) != tokenize_chinese_chars ): snake_case_ = getattr(_UpperCamelCase , normalizer_state.pop('''type''' ) ) snake_case_ = do_lower_case snake_case_ = strip_accents snake_case_ = tokenize_chinese_chars snake_case_ = normalizer_class(**_UpperCamelCase ) snake_case_ = do_lower_case def snake_case__( self : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=None ) ->List[Any]: snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case__( self : Any , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]: snake_case_ = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase ) return tuple(_UpperCamelCase )
8
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''', '''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''', # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = "xlm-roberta-xl" def __init__( self : str , _UpperCamelCase : Union[str, Any]=2_5_0_8_8_0 , _UpperCamelCase : List[Any]=2_5_6_0 , _UpperCamelCase : Any=3_6 , _UpperCamelCase : Dict=3_2 , _UpperCamelCase : Optional[int]=1_0_2_4_0 , _UpperCamelCase : Dict="gelu" , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : Union[str, Any]=5_1_4 , _UpperCamelCase : Dict=1 , _UpperCamelCase : int=0.02 , _UpperCamelCase : List[str]=1e-05 , _UpperCamelCase : Dict=1 , _UpperCamelCase : List[str]=0 , _UpperCamelCase : str=2 , _UpperCamelCase : Dict="absolute" , _UpperCamelCase : Optional[Any]=True , _UpperCamelCase : Dict=None , **_UpperCamelCase : List[Any] , ) ->Union[str, Any]: super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = hidden_act snake_case_ = intermediate_size snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = position_embedding_type snake_case_ = use_cache snake_case_ = classifier_dropout class snake_case_ ( __A ): '''simple docstring''' @property def snake_case__( self : List[str] ) ->Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": snake_case_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: snake_case_ = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
8
import math def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 10001 ): try: snake_case_ = int(SCREAMING_SNAKE_CASE__ ) except (TypeError, ValueError): raise TypeError('''Parameter nth must be int or castable to int.''' ) from None if nth <= 0: raise ValueError('''Parameter nth must be greater than or equal to one.''' ) snake_case_ = [] snake_case_ = 2 while len(SCREAMING_SNAKE_CASE__ ) < nth: if is_prime(SCREAMING_SNAKE_CASE__ ): primes.append(SCREAMING_SNAKE_CASE__ ) num += 1 else: num += 1 return primes[len(SCREAMING_SNAKE_CASE__ ) - 1] if __name__ == "__main__": print(f"""{solution() = }""")
8
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model'''} lowerCAmelCase_ = { '''vocab_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model''' ), }, } lowerCAmelCase_ = { '''moussaKam/mbarthez''': 10_24, '''moussaKam/barthez''': 10_24, '''moussaKam/barthez-orangesum-title''': 10_24, } lowerCAmelCase_ = '''▁''' class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : Union[str, Any] = ["input_ids", "attention_mask"] def __init__( self : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Any="<s>" , _UpperCamelCase : Dict="</s>" , _UpperCamelCase : List[str]="</s>" , _UpperCamelCase : List[str]="<s>" , _UpperCamelCase : List[Any]="<unk>" , _UpperCamelCase : Optional[int]="<pad>" , _UpperCamelCase : Optional[int]="<mask>" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Optional[int] , ) ->None: # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , ) snake_case_ = vocab_file snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_UpperCamelCase ) ) snake_case_ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} snake_case_ = len(self.sp_model ) - 1 snake_case_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def snake_case__( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def snake_case__( self : Optional[int] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) ->List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCamelCase )) + [1] return [1] + ([0] * len(_UpperCamelCase )) + [1, 1] + ([0] * len(_UpperCamelCase )) + [1] def snake_case__( self : str , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def snake_case__( self : Union[str, Any] ) ->Union[str, Any]: return len(self.sp_model ) def snake_case__( self : Dict ) ->str: snake_case_ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def snake_case__( self : Optional[int] , _UpperCamelCase : str ) ->List[str]: return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase ) def snake_case__( self : List[Any] , _UpperCamelCase : Tuple ) ->str: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] snake_case_ = self.sp_model.PieceToId(_UpperCamelCase ) return spm_id if spm_id else self.unk_token_id def snake_case__( self : Optional[Any] , _UpperCamelCase : Optional[Any] ) ->Dict: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(_UpperCamelCase ) def snake_case__( self : Any , _UpperCamelCase : List[str] ) ->Dict: snake_case_ = [] snake_case_ = '''''' snake_case_ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_UpperCamelCase ) + token snake_case_ = True snake_case_ = [] else: current_sub_tokens.append(_UpperCamelCase ) snake_case_ = False out_string += self.sp_model.decode(_UpperCamelCase ) return out_string.strip() def __getstate__( self : Any ) ->Optional[int]: snake_case_ = self.__dict__.copy() snake_case_ = None return state def __setstate__( self : Tuple , _UpperCamelCase : Dict ) ->str: snake_case_ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): snake_case_ = {} snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def snake_case__( self : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]: if not os.path.isdir(_UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case_ = os.path.join( _UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCamelCase , '''wb''' ) as fi: snake_case_ = self.sp_model.serialized_model_proto() fi.write(_UpperCamelCase ) return (out_vocab_file,)
8
from sklearn.metrics import mean_squared_error import datasets lowerCAmelCase_ = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' lowerCAmelCase_ = '''\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. ''' lowerCAmelCase_ = ''' Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. "raw_values" : Returns a full set of errors in case of multioutput input. "uniform_average" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric("mse") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {\'mse\': 0.6123724356957945} If you\'re using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric("mse", "multilist") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {\'mse\': array([0.41666667, 1. ])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case_ ( datasets.Metric ): '''simple docstring''' def snake_case__( self : Optional[int] ) ->List[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def snake_case__( self : List[Any] ) ->Optional[int]: if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''' ) ), "references": datasets.Sequence(datasets.Value('''float''' ) ), } else: return { "predictions": datasets.Value('''float''' ), "references": datasets.Value('''float''' ), } def snake_case__( self : int , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any=None , _UpperCamelCase : Optional[int]="uniform_average" , _UpperCamelCase : Tuple=True ) ->Tuple: snake_case_ = mean_squared_error( _UpperCamelCase , _UpperCamelCase , sample_weight=_UpperCamelCase , multioutput=_UpperCamelCase , squared=_UpperCamelCase ) return {"mse": mse}
8
1
lowerCAmelCase_ = { "km/h": 1.0, "m/s": 3.6, "mph": 1.609344, "knot": 1.852, } lowerCAmelCase_ = { "km/h": 1.0, "m/s": 0.277777778, "mph": 0.621371192, "knot": 0.539956803, } def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if unit_to not in speed_chart or unit_from not in speed_chart_inverse: snake_case_ = ( F'''Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n''' F'''Valid values are: {', '.join(SCREAMING_SNAKE_CASE__ )}''' ) raise ValueError(SCREAMING_SNAKE_CASE__ ) return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 ) if __name__ == "__main__": import doctest doctest.testmod()
8
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = [] if len(SCREAMING_SNAKE_CASE__ ) == 1: return [nums.copy()] for _ in range(len(SCREAMING_SNAKE_CASE__ ) ): snake_case_ = nums.pop(0 ) snake_case_ = permute(SCREAMING_SNAKE_CASE__ ) for perm in permutations: perm.append(SCREAMING_SNAKE_CASE__ ) result.extend(SCREAMING_SNAKE_CASE__ ) nums.append(SCREAMING_SNAKE_CASE__ ) return result def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): def backtrack(SCREAMING_SNAKE_CASE__ ): if start == len(SCREAMING_SNAKE_CASE__ ) - 1: output.append(nums[:] ) else: for i in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ): snake_case_, snake_case_ = nums[i], nums[start] backtrack(start + 1 ) snake_case_, snake_case_ = nums[i], nums[start] # backtrack snake_case_ = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function lowerCAmelCase_ = permutea([1, 2, 3]) print(res) doctest.testmod()
8
1
import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = tmp_path / '''file.csv''' snake_case_ = textwrap.dedent( '''\ header1,header2 1,2 10,20 ''' ) with open(SCREAMING_SNAKE_CASE__ , '''w''' ) as f: f.write(SCREAMING_SNAKE_CASE__ ) return str(SCREAMING_SNAKE_CASE__ ) @pytest.fixture def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = tmp_path / '''malformed_file.csv''' snake_case_ = textwrap.dedent( '''\ header1,header2 1,2 10,20, ''' ) with open(SCREAMING_SNAKE_CASE__ , '''w''' ) as f: f.write(SCREAMING_SNAKE_CASE__ ) return str(SCREAMING_SNAKE_CASE__ ) @pytest.fixture def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = tmp_path / '''csv_with_image.csv''' snake_case_ = textwrap.dedent( F'''\ image {image_file} ''' ) with open(SCREAMING_SNAKE_CASE__ , '''w''' ) as f: f.write(SCREAMING_SNAKE_CASE__ ) return str(SCREAMING_SNAKE_CASE__ ) @pytest.fixture def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = tmp_path / '''csv_with_label.csv''' snake_case_ = textwrap.dedent( '''\ label good bad good ''' ) with open(SCREAMING_SNAKE_CASE__ , '''w''' ) as f: f.write(SCREAMING_SNAKE_CASE__ ) return str(SCREAMING_SNAKE_CASE__ ) @pytest.fixture def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = tmp_path / '''csv_with_int_list.csv''' snake_case_ = textwrap.dedent( '''\ int_list 1 2 3 4 5 6 7 8 9 ''' ) with open(SCREAMING_SNAKE_CASE__ , '''w''' ) as f: f.write(SCREAMING_SNAKE_CASE__ ) return str(SCREAMING_SNAKE_CASE__ ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = Csv() snake_case_ = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(SCREAMING_SNAKE_CASE__ , match='''Error tokenizing data''' ): for _ in generator: pass assert any( record.levelname == '''ERROR''' and '''Failed to read file''' in record.message and os.path.basename(SCREAMING_SNAKE_CASE__ ) in record.message for record in caplog.records ) @require_pil def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as f: snake_case_ = f.read().splitlines()[1] snake_case_ = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) ) snake_case_ = csv._generate_tables([[csv_file_with_image]] ) snake_case_ = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('''image''' ).type == Image()() snake_case_ = pa_table.to_pydict()['''image'''] assert generated_content == [{"path": image_file, "bytes": None}] def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as f: snake_case_ = f.read().splitlines()[1:] snake_case_ = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) ) snake_case_ = csv._generate_tables([[csv_file_with_label]] ) snake_case_ = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )() snake_case_ = pa_table.to_pydict()['''label'''] assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(SCREAMING_SNAKE_CASE__ ) for label in labels] def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda SCREAMING_SNAKE_CASE__ : [int(SCREAMING_SNAKE_CASE__ ) for i in x.split()]} ) snake_case_ = csv._generate_tables([[csv_file_with_int_list]] ) snake_case_ = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type ) snake_case_ = pa_table.to_pydict()['''int_list'''] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
8
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''XGLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''XGLMTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XGLMForCausalLM''', '''XGLMModel''', '''XGLMPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''FlaxXGLMForCausalLM''', '''FlaxXGLMModel''', '''FlaxXGLMPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXGLMForCausalLM''', '''TFXGLMModel''', '''TFXGLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
8
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''', '''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''', '''junnyu/roformer_chinese_char_small''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json''' ), '''junnyu/roformer_chinese_char_base''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json''' ), '''junnyu/roformer_small_discriminator''': ( '''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json''' ), '''junnyu/roformer_small_generator''': ( '''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json''' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = "roformer" def __init__( self : Optional[int] , _UpperCamelCase : int=5_0_0_0_0 , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : int=7_6_8 , _UpperCamelCase : Dict=1_2 , _UpperCamelCase : Optional[int]=1_2 , _UpperCamelCase : Dict=3_0_7_2 , _UpperCamelCase : List[str]="gelu" , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Dict=1_5_3_6 , _UpperCamelCase : Tuple=2 , _UpperCamelCase : int=0.02 , _UpperCamelCase : Any=1e-12 , _UpperCamelCase : Any=0 , _UpperCamelCase : Any=False , _UpperCamelCase : Dict=True , **_UpperCamelCase : List[Any] , ) ->str: super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase ) snake_case_ = vocab_size snake_case_ = hidden_size if embedding_size is None else embedding_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = hidden_act snake_case_ = intermediate_size snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = rotary_value snake_case_ = use_cache class snake_case_ ( __A ): '''simple docstring''' @property def snake_case__( self : List[Any] ) ->Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": snake_case_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: snake_case_ = {0: '''batch''', 1: '''sequence'''} snake_case_ = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
8
from ..utils import DummyObject, requires_backends class snake_case_ ( metaclass=__A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = ["note_seq"] def __init__( self : Optional[int] , *_UpperCamelCase : str , **_UpperCamelCase : Optional[int] ) ->Any: requires_backends(self , ['''note_seq'''] ) @classmethod def snake_case__( cls : int , *_UpperCamelCase : Any , **_UpperCamelCase : List[Any] ) ->int: requires_backends(cls , ['''note_seq'''] ) @classmethod def snake_case__( cls : Dict , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Union[str, Any] ) ->List[str]: requires_backends(cls , ['''note_seq'''] )
8
1
from math import factorial def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 100 ): return sum(int(SCREAMING_SNAKE_CASE__ ) for x in str(factorial(SCREAMING_SNAKE_CASE__ ) ) ) if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
8
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = "vit_msn" def __init__( self : Dict , _UpperCamelCase : Optional[int]=7_6_8 , _UpperCamelCase : Optional[Any]=1_2 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : str=3_0_7_2 , _UpperCamelCase : Tuple="gelu" , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : List[str]=0.02 , _UpperCamelCase : List[Any]=1e-06 , _UpperCamelCase : Any=2_2_4 , _UpperCamelCase : Optional[Any]=1_6 , _UpperCamelCase : Any=3 , _UpperCamelCase : str=True , **_UpperCamelCase : Any , ) ->int: super().__init__(**_UpperCamelCase ) snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = qkv_bias
8
1
import argparse import os import re import tensorflow as tf import torch from transformers import BertConfig, BertModel from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = os.path.abspath(SCREAMING_SNAKE_CASE__ ) logger.info(F'''Converting TensorFlow checkpoint from {tf_path}''' ) # Load weights from TF model snake_case_ = tf.train.list_variables(SCREAMING_SNAKE_CASE__ ) snake_case_ = [] snake_case_ = [] snake_case_ = [] for full_name, shape in init_vars: # logger.info(f"Loading TF weight {name} with shape {shape}") snake_case_ = full_name.split('''/''' ) if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]: logger.info(F'''Skipping non-model layer {full_name}''' ) continue if "optimizer" in full_name: logger.info(F'''Skipping optimization layer {full_name}''' ) continue if name[0] == "model": # ignore initial 'model' snake_case_ = name[1:] # figure out how many levels deep the name is snake_case_ = 0 for _name in name: if _name.startswith('''layer_with_weights''' ): depth += 1 else: break layer_depth.append(SCREAMING_SNAKE_CASE__ ) # read data snake_case_ = tf.train.load_variable(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) names.append('''/'''.join(SCREAMING_SNAKE_CASE__ ) ) arrays.append(SCREAMING_SNAKE_CASE__ ) logger.info(F'''Read a total of {len(SCREAMING_SNAKE_CASE__ ):,} layers''' ) # Sanity check if len(set(SCREAMING_SNAKE_CASE__ ) ) != 1: raise ValueError(F'''Found layer names with different depths (layer depth {list(set(SCREAMING_SNAKE_CASE__ ) )})''' ) snake_case_ = list(set(SCREAMING_SNAKE_CASE__ ) )[0] if layer_depth != 1: raise ValueError( '''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP''' ''' heads.''' ) # convert layers logger.info('''Converting weights...''' ) for full_name, array in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = full_name.split('''/''' ) snake_case_ = model snake_case_ = [] for i, m_name in enumerate(SCREAMING_SNAKE_CASE__ ): if m_name == ".ATTRIBUTES": # variable names end with .ATTRIBUTES/VARIABLE_VALUE break if m_name.startswith('''layer_with_weights''' ): snake_case_ = int(m_name.split('''-''' )[-1] ) if layer_num <= 2: # embedding layers # layer_num 0: word_embeddings # layer_num 1: position_embeddings # layer_num 2: token_type_embeddings continue elif layer_num == 3: # embedding LayerNorm trace.extend(['''embeddings''', '''LayerNorm'''] ) snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''embeddings''' ) snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''LayerNorm''' ) elif layer_num > 3 and layer_num < config.num_hidden_layers + 4: # encoder layers trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] ) snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''encoder''' ) snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''layer''' ) snake_case_ = pointer[layer_num - 4] elif layer_num == config.num_hidden_layers + 4: # pooler layer trace.extend(['''pooler''', '''dense'''] ) snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''pooler''' ) snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''dense''' ) elif m_name == "embeddings": trace.append('''embeddings''' ) snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''embeddings''' ) if layer_num == 0: trace.append('''word_embeddings''' ) snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''word_embeddings''' ) elif layer_num == 1: trace.append('''position_embeddings''' ) snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''position_embeddings''' ) elif layer_num == 2: trace.append('''token_type_embeddings''' ) snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''token_type_embeddings''' ) else: raise ValueError(F'''Unknown embedding layer with name {full_name}''' ) trace.append('''weight''' ) snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''weight''' ) elif m_name == "_attention_layer": # self-attention layer trace.extend(['''attention''', '''self'''] ) snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''attention''' ) snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''self''' ) elif m_name == "_attention_layer_norm": # output attention norm trace.extend(['''attention''', '''output''', '''LayerNorm'''] ) snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''attention''' ) snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''output''' ) snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''LayerNorm''' ) elif m_name == "_attention_output_dense": # output attention dense trace.extend(['''attention''', '''output''', '''dense'''] ) snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''attention''' ) snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''output''' ) snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''dense''' ) elif m_name == "_output_dense": # output dense trace.extend(['''output''', '''dense'''] ) snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''output''' ) snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''dense''' ) elif m_name == "_output_layer_norm": # output dense trace.extend(['''output''', '''LayerNorm'''] ) snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''output''' ) snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''LayerNorm''' ) elif m_name == "_key_dense": # attention key trace.append('''key''' ) snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''key''' ) elif m_name == "_query_dense": # attention query trace.append('''query''' ) snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''query''' ) elif m_name == "_value_dense": # attention value trace.append('''value''' ) snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''value''' ) elif m_name == "_intermediate_dense": # attention intermediate dense trace.extend(['''intermediate''', '''dense'''] ) snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''intermediate''' ) snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''dense''' ) elif m_name == "_output_layer_norm": # output layer norm trace.append('''output''' ) snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''output''' ) # weights & biases elif m_name in ["bias", "beta"]: trace.append('''bias''' ) snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''bias''' ) elif m_name in ["kernel", "gamma"]: trace.append('''weight''' ) snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''weight''' ) else: logger.warning(F'''Ignored {m_name}''' ) # for certain layers reshape is necessary snake_case_ = '''.'''.join(SCREAMING_SNAKE_CASE__ ) if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , SCREAMING_SNAKE_CASE__ ) or re.match( R'''(\S+)\.attention\.output\.dense\.weight''' , SCREAMING_SNAKE_CASE__ ): snake_case_ = array.reshape(pointer.data.shape ) if "kernel" in full_name: snake_case_ = array.transpose() if pointer.shape == array.shape: snake_case_ = torch.from_numpy(SCREAMING_SNAKE_CASE__ ) else: raise ValueError( F'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:''' F''' {array.shape}''' ) logger.info(F'''Successfully set variable {full_name} to PyTorch layer {trace}''' ) return model def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # Instantiate model logger.info(F'''Loading model based on config from {config_path}...''' ) snake_case_ = BertConfig.from_json_file(SCREAMING_SNAKE_CASE__ ) snake_case_ = BertModel(SCREAMING_SNAKE_CASE__ ) # Load weights from checkpoint logger.info(F'''Loading weights from checkpoint {tf_checkpoint_path}...''' ) load_tfa_weights_in_bert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Save pytorch-model logger.info(F'''Saving PyTorch model to {pytorch_dump_path}...''' ) torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( '''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.''' ) parser.add_argument( '''--bert_config_file''', type=str, required=True, help='''The config json file corresponding to the BERT model. This specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', type=str, required=True, help='''Path to the output PyTorch model (must include filename).''', ) lowerCAmelCase_ = parser.parse_args() convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
8
from __future__ import annotations from math import pi, sqrt def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if inductance <= 0: raise ValueError('''Inductance cannot be 0 or negative''' ) elif capacitance <= 0: raise ValueError('''Capacitance cannot be 0 or negative''' ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
8
1
from pathlib import Path import numpy as np from PIL import Image def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_, snake_case_, snake_case_ = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2989 * r + 0.5870 * g + 0.1140 * b def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return (gray > 127) & (gray <= 255) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = np.zeros_like(SCREAMING_SNAKE_CASE__ ) snake_case_ = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image snake_case_ = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): snake_case_ = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() snake_case_ = int(summation > 0 ) return output if __name__ == "__main__": # read original image lowerCAmelCase_ = Path(__file__).resolve().parent / '''image_data''' / '''lena.jpg''' lowerCAmelCase_ = np.array(Image.open(lena_path)) # kernel to be applied lowerCAmelCase_ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) lowerCAmelCase_ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image lowerCAmelCase_ = Image.fromarray(output).convert('''RGB''') pil_img.save('''result_dilation.png''')
8
import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return x + 2 class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Optional[Any] ) ->int: snake_case_ = '''x = 3''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result == 3 self.assertDictEqual(_UpperCamelCase , {'''x''': 3} ) snake_case_ = '''x = y''' snake_case_ = {'''y''': 5} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 5, '''y''': 5} ) def snake_case__( self : Dict ) ->Optional[int]: snake_case_ = '''y = add_two(x)''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} ) # Won't work without the tool with CaptureStdout() as out: snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result is None assert "tried to execute add_two" in out.out def snake_case__( self : Union[str, Any] ) ->Dict: snake_case_ = '''x = 3''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result == 3 self.assertDictEqual(_UpperCamelCase , {'''x''': 3} ) def snake_case__( self : Optional[int] ) ->Optional[int]: snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} ) self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} ) def snake_case__( self : Dict ) ->str: snake_case_ = '''x = 3\ny = 5''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} ) def snake_case__( self : str ) ->Tuple: snake_case_ = '''text = f\'This is x: {x}.\'''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''text''': '''This is x: 3.'''} ) def snake_case__( self : Optional[Any] ) ->List[str]: snake_case_ = '''if x <= 3:\n y = 2\nelse:\n y = 5''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 2} ) snake_case_ = {'''x''': 8} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 8, '''y''': 5} ) def snake_case__( self : str ) ->str: snake_case_ = '''test_list = [x, add_two(x)]''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , [3, 5] ) self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} ) def snake_case__( self : Any ) ->List[Any]: snake_case_ = '''y = x''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result == 3 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 3} ) def snake_case__( self : Optional[int] ) ->Dict: snake_case_ = '''test_list = [x, add_two(x)]\ntest_list[1]''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} ) snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} ) def snake_case__( self : Optional[Any] ) ->int: snake_case_ = '''x = 0\nfor i in range(3):\n x = i''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {'''range''': range} , state=_UpperCamelCase ) assert result == 2 self.assertDictEqual(_UpperCamelCase , {'''x''': 2, '''i''': 2} )
8
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: lowerCAmelCase_ = None lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase_ = { '''vocab_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''', }, '''tokenizer_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''', }, } lowerCAmelCase_ = { '''camembert-base''': 5_12, } lowerCAmelCase_ = '''▁''' class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : List[Any] = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE : Any = CamembertTokenizer def __init__( self : Optional[Any] , _UpperCamelCase : Tuple=None , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Optional[int]="<s>" , _UpperCamelCase : Any="</s>" , _UpperCamelCase : Tuple="</s>" , _UpperCamelCase : int="<s>" , _UpperCamelCase : Tuple="<unk>" , _UpperCamelCase : Any="<pad>" , _UpperCamelCase : Any="<mask>" , _UpperCamelCase : Optional[Any]=["<s>NOTUSED", "</s>NOTUSED"] , **_UpperCamelCase : Dict , ) ->List[Any]: # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token super().__init__( _UpperCamelCase , tokenizer_file=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , **_UpperCamelCase , ) snake_case_ = vocab_file snake_case_ = False if not self.vocab_file else True def snake_case__( self : Optional[int] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def snake_case__( self : Union[str, Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def snake_case__( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(_UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case_ = os.path.join( _UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ): copyfile(self.vocab_file , _UpperCamelCase ) return (out_vocab_file,)
8
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Any , _UpperCamelCase : Any , _UpperCamelCase : Tuple ) ->List[Any]: return f'''gaussian_noise_s={seed}_shape={'_'.join([str(_UpperCamelCase ) for s in shape] )}.npy''' def snake_case__( self : Any ) ->List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() def snake_case__( self : int , _UpperCamelCase : Union[str, Any]=0 , _UpperCamelCase : int=(4, 4, 6_4, 6_4) , _UpperCamelCase : Optional[int]=False ) ->Tuple: snake_case_ = jnp.bfloataa if fpaa else jnp.floataa snake_case_ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase , _UpperCamelCase ) ) , dtype=_UpperCamelCase ) return image def snake_case__( self : List[Any] , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Optional[int]="CompVis/stable-diffusion-v1-4" ) ->Optional[Any]: snake_case_ = jnp.bfloataa if fpaa else jnp.floataa snake_case_ = '''bf16''' if fpaa else None snake_case_, snake_case_ = FlaxUNetaDConditionModel.from_pretrained( _UpperCamelCase , subfolder='''unet''' , dtype=_UpperCamelCase , revision=_UpperCamelCase ) return model, params def snake_case__( self : Dict , _UpperCamelCase : List[Any]=0 , _UpperCamelCase : Tuple=(4, 7_7, 7_6_8) , _UpperCamelCase : List[Any]=False ) ->int: snake_case_ = jnp.bfloataa if fpaa else jnp.floataa snake_case_ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase , _UpperCamelCase ) ) , dtype=_UpperCamelCase ) return hidden_states @parameterized.expand( [ # fmt: off [8_3, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], [1_7, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], [3, 1_0_0_0, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], # fmt: on ] ) def snake_case__( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] ) ->Union[str, Any]: snake_case_, snake_case_ = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=_UpperCamelCase ) snake_case_ = self.get_latents(_UpperCamelCase , fpaa=_UpperCamelCase ) snake_case_ = self.get_encoder_hidden_states(_UpperCamelCase , fpaa=_UpperCamelCase ) snake_case_ = model.apply( {'''params''': params} , _UpperCamelCase , jnp.array(_UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCamelCase , ).sample assert sample.shape == latents.shape snake_case_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) snake_case_ = jnp.array(_UpperCamelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [8_3, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], [1_7, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], [3, 1_0_0_0, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], # fmt: on ] ) def snake_case__( self : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str ) ->Dict: snake_case_, snake_case_ = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=_UpperCamelCase ) snake_case_ = self.get_latents(_UpperCamelCase , shape=(4, 4, 9_6, 9_6) , fpaa=_UpperCamelCase ) snake_case_ = self.get_encoder_hidden_states(_UpperCamelCase , shape=(4, 7_7, 1_0_2_4) , fpaa=_UpperCamelCase ) snake_case_ = model.apply( {'''params''': params} , _UpperCamelCase , jnp.array(_UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCamelCase , ).sample assert sample.shape == latents.shape snake_case_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) snake_case_ = jnp.array(_UpperCamelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-2 )
8
1
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
8
import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def __SCREAMING_SNAKE_CASE (*SCREAMING_SNAKE_CASE__ ): if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = list(SCREAMING_SNAKE_CASE__ ) for i in range(len(SCREAMING_SNAKE_CASE__ ) ): snake_case_ = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = [ '''CUDA out of memory.''', # CUDA OOM '''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU '''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM ] if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 128 ): if function is None: return functools.partial(SCREAMING_SNAKE_CASE__ , starting_batch_size=SCREAMING_SNAKE_CASE__ ) snake_case_ = starting_batch_size def decorator(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() snake_case_ = list(inspect.signature(SCREAMING_SNAKE_CASE__ ).parameters.keys() ) # Guard against user error if len(SCREAMING_SNAKE_CASE__ ) < (len(SCREAMING_SNAKE_CASE__ ) + 1): snake_case_ = ''', '''.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( F'''Batch size was passed into `{function.__name__}` as the first argument when called.''' F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' ) while True: if batch_size == 0: raise RuntimeError('''No executable batch size found, reached zero.''' ) try: return function(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) except Exception as e: if should_reduce_batch_size(SCREAMING_SNAKE_CASE__ ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
8
1
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 1000 ): return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
8
from __future__ import annotations def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return [ord(SCREAMING_SNAKE_CASE__ ) - 96 for elem in plain] def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return "".join(chr(elem + 96 ) for elem in encoded ) def __SCREAMING_SNAKE_CASE (): snake_case_ = encode(input('''-> ''' ).strip().lower() ) print('''Encoded: ''' , SCREAMING_SNAKE_CASE__ ) print('''Decoded:''' , decode(SCREAMING_SNAKE_CASE__ ) ) if __name__ == "__main__": main()
8
1
from collections import Counter from timeit import timeit def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = "" , ): return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2 def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = "" ): if len(SCREAMING_SNAKE_CASE__ ) == 0: return True snake_case_ = input_str.replace(''' ''' , '''''' ).lower() # character_freq_dict: Stores the frequency of every character in the input string snake_case_ = {} for character in lower_case_input_str: snake_case_ = character_freq_dict.get(SCREAMING_SNAKE_CASE__ , 0 ) + 1 snake_case_ = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = "" ): print('''\nFor string = ''' , SCREAMING_SNAKE_CASE__ , ''':''' ) print( '''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(SCREAMING_SNAKE_CASE__ ) , '''\ttime =''' , timeit( '''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , ) print( '''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(SCREAMING_SNAKE_CASE__ ) , '''\ttime =''' , timeit( '''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , ) if __name__ == "__main__": lowerCAmelCase_ = input( '''Enter string to determine if it can be rearranged as a palindrome or not: ''' ).strip() benchmark(check_str) lowerCAmelCase_ = can_string_be_rearranged_as_palindrome_counter(check_str) print(f"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
8
import math def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(SCREAMING_SNAKE_CASE__ ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError('''This should never happen''' ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. lowerCAmelCase_ = '''Enter the base and the power separated by a comma: ''' lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''',''')) lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''',''')) # We find the log of each number, using the function res(), which takes two # arguments. lowerCAmelCase_ = res(xa, ya) lowerCAmelCase_ = res(xa, ya) # We check for the largest number if resa > resa: print('''Largest number is''', xa, '''^''', ya) elif resa > resa: print('''Largest number is''', xa, '''^''', ya) else: print('''Both are equal''')
8
1
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = "gpt_neo" SCREAMING_SNAKE_CASE : Any = ["past_key_values"] SCREAMING_SNAKE_CASE : Union[str, Any] = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self : Optional[int] , _UpperCamelCase : Dict=5_0_2_5_7 , _UpperCamelCase : Any=2_0_4_8 , _UpperCamelCase : Optional[Any]=2_0_4_8 , _UpperCamelCase : Optional[Any]=2_4 , _UpperCamelCase : Optional[Any]=[[["global", "local"], 1_2]] , _UpperCamelCase : Optional[Any]=1_6 , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Union[str, Any]=2_5_6 , _UpperCamelCase : Optional[int]="gelu_new" , _UpperCamelCase : Optional[int]=0.0 , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : List[str]=0.0 , _UpperCamelCase : Any=0.1 , _UpperCamelCase : List[Any]=1e-5 , _UpperCamelCase : Dict=0.02 , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Dict=5_0_2_5_6 , _UpperCamelCase : int=5_0_2_5_6 , **_UpperCamelCase : Optional[int] , ) ->Optional[int]: snake_case_ = vocab_size snake_case_ = max_position_embeddings snake_case_ = hidden_size snake_case_ = num_layers snake_case_ = num_heads snake_case_ = intermediate_size snake_case_ = window_size snake_case_ = activation_function snake_case_ = resid_dropout snake_case_ = embed_dropout snake_case_ = attention_dropout snake_case_ = classifier_dropout snake_case_ = layer_norm_epsilon snake_case_ = initializer_range snake_case_ = use_cache snake_case_ = bos_token_id snake_case_ = eos_token_id snake_case_ = attention_types snake_case_ = self.expand_attention_types_params(_UpperCamelCase ) if len(self.attention_layers ) != self.num_layers: raise ValueError( '''Configuration for convolutional module is incorrect. ''' '''It is required that `len(config.attention_layers)` == `config.num_layers` ''' f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, ''' f'''`config.num_layers = {self.num_layers}`. ''' '''`config.attention_layers` is prepared using `config.attention_types`. ''' '''Please verify the value of `config.attention_types` argument.''' ) super().__init__(bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) @staticmethod def snake_case__( _UpperCamelCase : Optional[int] ) ->int: snake_case_ = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): import torch snake_case_ = input.size() snake_case_ = len(SCREAMING_SNAKE_CASE__ ) snake_case_ = shape[dimension] snake_case_ = torch.arange(0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case_ = torch.div(sizedim - size , SCREAMING_SNAKE_CASE__ , rounding_mode='''floor''' ) + 1 snake_case_ = torch.arange(SCREAMING_SNAKE_CASE__ ) + low_indices[:min_length][:, None] snake_case_ = [slice(SCREAMING_SNAKE_CASE__ )] * rank snake_case_ = indices snake_case_ = input[s] snake_case_ = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(SCREAMING_SNAKE_CASE__ ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): import torch snake_case_ = torch.arange(1 , SCREAMING_SNAKE_CASE__ ) snake_case_ = torch.remainder(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case_ = remainders == 0 snake_case_ = candidates[divisor_indices] snake_case_ = torch.max(SCREAMING_SNAKE_CASE__ ) return largest_divisor, torch.div(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rounding_mode='''floor''' ) class snake_case_ ( __A ): '''simple docstring''' @property def snake_case__( self : List[Any] ) ->Mapping[str, Mapping[int, str]]: snake_case_ = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(_UpperCamelCase , direction='''inputs''' ) snake_case_ = {0: '''batch''', 1: '''past_sequence + sequence'''} else: snake_case_ = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def snake_case__( self : str ) ->int: return self._config.num_heads def snake_case__( self : Optional[int] , _UpperCamelCase : PreTrainedTokenizer , _UpperCamelCase : int = -1 , _UpperCamelCase : int = -1 , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[TensorType] = None , ) ->Mapping[str, Any]: snake_case_ = super(_UpperCamelCase , self ).generate_dummy_inputs( _UpperCamelCase , batch_size=_UpperCamelCase , seq_length=_UpperCamelCase , is_pair=_UpperCamelCase , framework=_UpperCamelCase ) # We need to order the input in the way they appears in the forward() snake_case_ = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch snake_case_, snake_case_ = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values snake_case_ = seqlen + 2 snake_case_ = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) snake_case_ = [ (torch.zeros(_UpperCamelCase ), torch.zeros(_UpperCamelCase )) for _ in range(self.num_layers ) ] snake_case_ = common_inputs['''attention_mask'''] if self.use_past: snake_case_ = ordered_inputs['''attention_mask'''].dtype snake_case_ = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(_UpperCamelCase , _UpperCamelCase , dtype=_UpperCamelCase )] , dim=1 ) return ordered_inputs @property def snake_case__( self : int ) ->int: return 1_3
8
import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'''vocab_file''': '''spiece.model'''} lowerCAmelCase_ = { '''vocab_file''': { '''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''', '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model''' ), } } lowerCAmelCase_ = { '''google/bigbird-roberta-base''': 40_96, '''google/bigbird-roberta-large''': 40_96, '''google/bigbird-base-trivia-itc''': 40_96, } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : List[Any] = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE : List[int] = [] def __init__( self : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict="<unk>" , _UpperCamelCase : List[str]="<s>" , _UpperCamelCase : Tuple="</s>" , _UpperCamelCase : Any="<pad>" , _UpperCamelCase : Any="[SEP]" , _UpperCamelCase : Optional[Any]="[MASK]" , _UpperCamelCase : Any="[CLS]" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Dict , ) ->None: snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else bos_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else eos_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else unk_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else pad_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else cls_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else sep_token # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , sep_token=_UpperCamelCase , mask_token=_UpperCamelCase , cls_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , ) snake_case_ = vocab_file snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCamelCase ) @property def snake_case__( self : str ) ->List[Any]: return self.sp_model.get_piece_size() def snake_case__( self : int ) ->Union[str, Any]: snake_case_ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Tuple ) ->Any: snake_case_ = self.__dict__.copy() snake_case_ = None return state def __setstate__( self : str , _UpperCamelCase : List[Any] ) ->List[str]: snake_case_ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): snake_case_ = {} snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def snake_case__( self : Optional[int] , _UpperCamelCase : str ) ->List[str]: return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase ) def snake_case__( self : str , _UpperCamelCase : List[str] ) ->Tuple: return self.sp_model.piece_to_id(_UpperCamelCase ) def snake_case__( self : Union[str, Any] , _UpperCamelCase : str ) ->List[Any]: snake_case_ = self.sp_model.IdToPiece(_UpperCamelCase ) return token def snake_case__( self : Dict , _UpperCamelCase : Optional[int] ) ->List[str]: snake_case_ = [] snake_case_ = '''''' snake_case_ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_UpperCamelCase ) + token snake_case_ = True snake_case_ = [] else: current_sub_tokens.append(_UpperCamelCase ) snake_case_ = False out_string += self.sp_model.decode(_UpperCamelCase ) return out_string.strip() def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : bool = False , _UpperCamelCase : bool = None , _UpperCamelCase : bool = True , **_UpperCamelCase : List[str] , ) ->str: snake_case_ = kwargs.pop('''use_source_tokenizer''' , _UpperCamelCase ) snake_case_ = self.convert_ids_to_tokens(_UpperCamelCase , skip_special_tokens=_UpperCamelCase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 snake_case_ = [] snake_case_ = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) ) snake_case_ = [] sub_texts.append(_UpperCamelCase ) else: current_sub_text.append(_UpperCamelCase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: snake_case_ = re.sub(R''' (\[(MASK|SEP)\])''' , R'''\1''' , ''' '''.join(_UpperCamelCase ) ) else: snake_case_ = ''''''.join(_UpperCamelCase ) snake_case_ = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: snake_case_ = self.clean_up_tokenization(_UpperCamelCase ) return clean_text else: return text def snake_case__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]: if not os.path.isdir(_UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case_ = os.path.join( _UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCamelCase , '''wb''' ) as fi: snake_case_ = self.sp_model.serialized_model_proto() fi.write(_UpperCamelCase ) return (out_vocab_file,) def snake_case__( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def snake_case__( self : List[str] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) ->List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCamelCase )) + [1] return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1] def snake_case__( self : List[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
8
1
from __future__ import annotations lowerCAmelCase_ = list[list[int]] # assigning initial values to the grid lowerCAmelCase_ = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution lowerCAmelCase_ = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if location := find_empty_location(SCREAMING_SNAKE_CASE__ ): snake_case_, snake_case_ = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = digit if sudoku(SCREAMING_SNAKE_CASE__ ) is not None: return grid snake_case_ = 0 return None def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): for row in grid: for cell in row: print(SCREAMING_SNAKE_CASE__ , end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print('''\nExample grid:\n''' + '''=''' * 20) print_solution(example_grid) print('''\nExample grid solution:''') lowerCAmelCase_ = sudoku(example_grid) if solution is not None: print_solution(solution) else: print('''Cannot find a solution.''')
8
from __future__ import annotations from collections.abc import Generator def __SCREAMING_SNAKE_CASE (): snake_case_ = {} snake_case_ = 2 while True: snake_case_ = factor_map.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if factor: snake_case_ = factor + prime while x in factor_map: x += factor snake_case_ = factor else: snake_case_ = prime yield prime prime += 1 def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 1E10 ): snake_case_ = sieve() snake_case_ = 1 while True: snake_case_ = next(SCREAMING_SNAKE_CASE__ ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(SCREAMING_SNAKE_CASE__ ) n += 2 if __name__ == "__main__": print(solution())
8
1
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
8
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OPTForCausalLM''', '''OPTModel''', '''OPTPreTrainedModel''', '''OPTForSequenceClassification''', '''OPTForQuestionAnswering''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''FlaxOPTForCausalLM''', '''FlaxOPTModel''', '''FlaxOPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
8
1
import random class snake_case_ : '''simple docstring''' @staticmethod def snake_case__( _UpperCamelCase : str ) ->tuple[list[int], list[int]]: snake_case_ = [ord(_UpperCamelCase ) for i in text] snake_case_ = [] snake_case_ = [] for i in plain: snake_case_ = random.randint(1 , 3_0_0 ) snake_case_ = (i + k) * k cipher.append(_UpperCamelCase ) key.append(_UpperCamelCase ) return cipher, key @staticmethod def snake_case__( _UpperCamelCase : list[int] , _UpperCamelCase : list[int] ) ->str: snake_case_ = [] for i in range(len(_UpperCamelCase ) ): snake_case_ = int((cipher[i] - (key[i]) ** 2) / key[i] ) plain.append(chr(_UpperCamelCase ) ) return "".join(_UpperCamelCase ) if __name__ == "__main__": lowerCAmelCase_ , lowerCAmelCase_ = Onepad().encrypt('''Hello''') print(c, k) print(Onepad().decrypt(c, k))
8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = "philschmid/bart-large-cnn-samsum" SCREAMING_SNAKE_CASE : Tuple = ( "This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, " "and returns a summary of the text." ) SCREAMING_SNAKE_CASE : str = "summarizer" SCREAMING_SNAKE_CASE : str = AutoTokenizer SCREAMING_SNAKE_CASE : str = AutoModelForSeqaSeqLM SCREAMING_SNAKE_CASE : Optional[int] = ["text"] SCREAMING_SNAKE_CASE : Optional[int] = ["text"] def snake_case__( self : str , _UpperCamelCase : int ) ->Optional[int]: return self.pre_processor(_UpperCamelCase , return_tensors='''pt''' , truncation=_UpperCamelCase ) def snake_case__( self : Tuple , _UpperCamelCase : Optional[int] ) ->Tuple: return self.model.generate(**_UpperCamelCase )[0] def snake_case__( self : Optional[Any] , _UpperCamelCase : Optional[int] ) ->Any: return self.pre_processor.decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
8
1
from __future__ import annotations from collections.abc import Generator def __SCREAMING_SNAKE_CASE (): snake_case_ = {} snake_case_ = 2 while True: snake_case_ = factor_map.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if factor: snake_case_ = factor + prime while x in factor_map: x += factor snake_case_ = factor else: snake_case_ = prime yield prime prime += 1 def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 1E10 ): snake_case_ = sieve() snake_case_ = 1 while True: snake_case_ = next(SCREAMING_SNAKE_CASE__ ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(SCREAMING_SNAKE_CASE__ ) n += 2 if __name__ == "__main__": print(solution())
8
from collections import deque from .hash_table import HashTable class snake_case_ ( __A ): '''simple docstring''' def __init__( self : int , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->Tuple: super().__init__(*_UpperCamelCase , **_UpperCamelCase ) def snake_case__( self : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Dict ) ->Tuple: snake_case_ = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(_UpperCamelCase ) snake_case_ = self.values[key] def snake_case__( self : List[Any] ) ->str: return ( sum(self.charge_factor - len(_UpperCamelCase ) for slot in self.values ) / self.size_table * self.charge_factor ) def snake_case__( self : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int]=None ) ->str: if not ( len(self.values[key] ) == self.charge_factor and self.values.count(_UpperCamelCase ) == 0 ): return key return super()._collision_resolution(_UpperCamelCase , _UpperCamelCase )
8
1
import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging lowerCAmelCase_ = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt'''] lowerCAmelCase_ = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse('''0.9.0'''): raise Exception('''requires fairseq >= 0.9.0''') logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = ''' Hello world! cécé herlolip''' lowerCAmelCase_ = [ ('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''), ('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''), ('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''), ('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''), ] def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', ] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = dct.pop(SCREAMING_SNAKE_CASE__ ) snake_case_ = val def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = torch.load(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' ) snake_case_ = torch.hub.load('''pytorch/fairseq''' , '''bart.large.cnn''' ).eval() hub_interface.model.load_state_dict(sd['''model'''] ) return hub_interface def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_, snake_case_ = emb.weight.shape snake_case_ = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ ) snake_case_ = emb.weight.data return lin_layer @torch.no_grad() def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ): if not os.path.exists(SCREAMING_SNAKE_CASE__ ): snake_case_ = torch.hub.load('''pytorch/fairseq''' , SCREAMING_SNAKE_CASE__ ).eval() else: snake_case_ = load_xsum_checkpoint(SCREAMING_SNAKE_CASE__ ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: snake_case_ = checkpoint_path.replace('''.''' , '''-''' ) snake_case_ = BartConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) snake_case_ = bart.encode(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ) snake_case_ = BartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ).encode(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).unsqueeze(0 ) if not torch.eq(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).all(): raise ValueError( F'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' ) if checkpoint_path == "bart.large.mnli": snake_case_ = bart.state_dict() remove_ignore_keys_(SCREAMING_SNAKE_CASE__ ) snake_case_ = state_dict['''model.decoder.embed_tokens.weight'''] for src, dest in mnli_rename_keys: rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case_ = BartForSequenceClassification(SCREAMING_SNAKE_CASE__ ).eval() model.load_state_dict(SCREAMING_SNAKE_CASE__ ) snake_case_ = bart.predict('''mnli''' , SCREAMING_SNAKE_CASE__ , return_logits=SCREAMING_SNAKE_CASE__ ) snake_case_ = model(SCREAMING_SNAKE_CASE__ )[0] # logits else: # no classification heads to worry about snake_case_ = bart.model.state_dict() remove_ignore_keys_(SCREAMING_SNAKE_CASE__ ) snake_case_ = state_dict['''decoder.embed_tokens.weight'''] snake_case_ = bart.extract_features(SCREAMING_SNAKE_CASE__ ) if hf_checkpoint_name == "facebook/bart-large": snake_case_ = BartModel(SCREAMING_SNAKE_CASE__ ).eval() model.load_state_dict(SCREAMING_SNAKE_CASE__ ) snake_case_ = model(SCREAMING_SNAKE_CASE__ ).model[0] else: snake_case_ = BartForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval() # an existing summarization ckpt model.model.load_state_dict(SCREAMING_SNAKE_CASE__ ) if hasattr(SCREAMING_SNAKE_CASE__ , '''lm_head''' ): snake_case_ = make_linear_from_emb(model.model.shared ) snake_case_ = model.model(SCREAMING_SNAKE_CASE__ )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( F'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' ) Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum''' ) lowerCAmelCase_ = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
8
from __future__ import annotations def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = len(SCREAMING_SNAKE_CASE__ ) # We need to create solution object to save path. snake_case_ = [[0 for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )] snake_case_ = run_maze(SCREAMING_SNAKE_CASE__ , 0 , 0 , SCREAMING_SNAKE_CASE__ ) if solved: print('''\n'''.join(str(SCREAMING_SNAKE_CASE__ ) for row in solutions ) ) else: print('''No solution exists!''' ) return solved def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = len(SCREAMING_SNAKE_CASE__ ) # Final check point. if i == j == (size - 1): snake_case_ = 1 return True snake_case_ = (not i < 0) and (not j < 0) # Check lower bounds snake_case_ = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. snake_case_ = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited snake_case_ = 1 # check for directions if ( run_maze(SCREAMING_SNAKE_CASE__ , i + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j + 1 , SCREAMING_SNAKE_CASE__ ) or run_maze(SCREAMING_SNAKE_CASE__ , i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j - 1 , SCREAMING_SNAKE_CASE__ ) ): return True snake_case_ = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
8
1
from typing import TYPE_CHECKING from ..utils import _LazyModule lowerCAmelCase_ = { '''config''': [ '''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''', '''OnnxConfig''', '''OnnxConfigWithPast''', '''OnnxSeq2SeqConfigWithPast''', '''PatchingSpec''', ], '''convert''': ['''export''', '''validate_model_outputs'''], '''features''': ['''FeaturesManager'''], '''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
8
from decimal import Decimal, getcontext from math import ceil, factorial def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise TypeError('''Undefined for non-integers''' ) elif precision < 1: raise ValueError('''Undefined for non-natural numbers''' ) snake_case_ = precision snake_case_ = ceil(precision / 14 ) snake_case_ = 426880 * Decimal(10005 ).sqrt() snake_case_ = 1 snake_case_ = 13591409 snake_case_ = Decimal(SCREAMING_SNAKE_CASE__ ) for k in range(1 , SCREAMING_SNAKE_CASE__ ): snake_case_ = factorial(6 * k ) // (factorial(3 * k ) * factorial(SCREAMING_SNAKE_CASE__ ) ** 3) linear_term += 545140134 exponential_term *= -262537412640768000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": lowerCAmelCase_ = 50 print(f"""The first {n} digits of pi is: {pi(n)}""")
8
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = "philschmid/bart-large-cnn-samsum" SCREAMING_SNAKE_CASE : Tuple = ( "This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, " "and returns a summary of the text." ) SCREAMING_SNAKE_CASE : str = "summarizer" SCREAMING_SNAKE_CASE : str = AutoTokenizer SCREAMING_SNAKE_CASE : str = AutoModelForSeqaSeqLM SCREAMING_SNAKE_CASE : Optional[int] = ["text"] SCREAMING_SNAKE_CASE : Optional[int] = ["text"] def snake_case__( self : str , _UpperCamelCase : int ) ->Optional[int]: return self.pre_processor(_UpperCamelCase , return_tensors='''pt''' , truncation=_UpperCamelCase ) def snake_case__( self : Tuple , _UpperCamelCase : Optional[int] ) ->Tuple: return self.model.generate(**_UpperCamelCase )[0] def snake_case__( self : Optional[Any] , _UpperCamelCase : Optional[int] ) ->Any: return self.pre_processor.decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
8
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class snake_case_ ( __A ): '''simple docstring''' def __init__( self : int , _UpperCamelCase : pyspark.sql.DataFrame , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : bool = True , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : str = None , _UpperCamelCase : bool = True , _UpperCamelCase : str = "arrow" , **_UpperCamelCase : Tuple , ) ->str: super().__init__( split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , **_UpperCamelCase , ) snake_case_ = load_from_cache_file snake_case_ = file_format snake_case_ = Spark( df=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , working_dir=_UpperCamelCase , **_UpperCamelCase , ) def snake_case__( self : int ) ->Tuple: if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) snake_case_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=_UpperCamelCase , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
8
1
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 200 ): snake_case_ = [1, 2, 5, 10, 20, 50, 100, 200] snake_case_ = [0] * (pence + 1) snake_case_ = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(SCREAMING_SNAKE_CASE__ , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(2_00) == 7_36_82
8
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable lowerCAmelCase_ = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''DPTFeatureExtractor'''] lowerCAmelCase_ = ['''DPTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DPTForDepthEstimation''', '''DPTForSemanticSegmentation''', '''DPTModel''', '''DPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
8
1
import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class snake_case_ ( __A , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = FlaxAutoencoderKL @property def snake_case__( self : List[str] ) ->List[Any]: snake_case_ = 4 snake_case_ = 3 snake_case_ = (3_2, 3_2) snake_case_ = jax.random.PRNGKey(0 ) snake_case_ = jax.random.uniform(_UpperCamelCase , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def snake_case__( self : Dict ) ->Union[str, Any]: snake_case_ = { '''block_out_channels''': [3_2, 6_4], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 4, } snake_case_ = self.dummy_input return init_dict, inputs_dict
8
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer lowerCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase_ = { '''vocab_file''': { '''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''', }, '''tokenizer_file''': { '''unc-nlp/lxmert-base-uncased''': ( '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase_ = { '''unc-nlp/lxmert-base-uncased''': 5_12, } lowerCAmelCase_ = { '''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True}, } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : Any = LxmertTokenizer def __init__( self : Union[str, Any] , _UpperCamelCase : int=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Dict=True , _UpperCamelCase : Any="[UNK]" , _UpperCamelCase : Tuple="[SEP]" , _UpperCamelCase : List[Any]="[PAD]" , _UpperCamelCase : Union[str, Any]="[CLS]" , _UpperCamelCase : str="[MASK]" , _UpperCamelCase : List[str]=True , _UpperCamelCase : List[str]=None , **_UpperCamelCase : List[str] , ) ->Any: super().__init__( _UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , ) snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _UpperCamelCase ) != do_lower_case or normalizer_state.get('''strip_accents''' , _UpperCamelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _UpperCamelCase ) != tokenize_chinese_chars ): snake_case_ = getattr(_UpperCamelCase , normalizer_state.pop('''type''' ) ) snake_case_ = do_lower_case snake_case_ = strip_accents snake_case_ = tokenize_chinese_chars snake_case_ = normalizer_class(**_UpperCamelCase ) snake_case_ = do_lower_case def snake_case__( self : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=None ) ->List[Any]: snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case__( self : Any , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]: snake_case_ = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase ) return tuple(_UpperCamelCase )
8
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { '''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Swinv2ForImageClassification''', '''Swinv2ForMaskedImageModeling''', '''Swinv2Model''', '''Swinv2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
8
import math def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 10001 ): try: snake_case_ = int(SCREAMING_SNAKE_CASE__ ) except (TypeError, ValueError): raise TypeError('''Parameter nth must be int or castable to int.''' ) from None if nth <= 0: raise ValueError('''Parameter nth must be greater than or equal to one.''' ) snake_case_ = [] snake_case_ = 2 while len(SCREAMING_SNAKE_CASE__ ) < nth: if is_prime(SCREAMING_SNAKE_CASE__ ): primes.append(SCREAMING_SNAKE_CASE__ ) num += 1 else: num += 1 return primes[len(SCREAMING_SNAKE_CASE__ ) - 1] if __name__ == "__main__": print(f"""{solution() = }""")
8
1
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class snake_case_ ( unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def snake_case__( self : Any , _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : List[str] ) ->str: snake_case_ = hf_hub_download( repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) snake_case_ = VideoClassificationPipeline(model=_UpperCamelCase , image_processor=_UpperCamelCase , top_k=2 ) snake_case_ = [ example_video_filepath, '''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''', ] return video_classifier, examples def snake_case__( self : int , _UpperCamelCase : int , _UpperCamelCase : Dict ) ->Optional[int]: for example in examples: snake_case_ = video_classifier(_UpperCamelCase ) self.assertEqual( _UpperCamelCase , [ {'''score''': ANY(_UpperCamelCase ), '''label''': ANY(_UpperCamelCase )}, {'''score''': ANY(_UpperCamelCase ), '''label''': ANY(_UpperCamelCase )}, ] , ) @require_torch def snake_case__( self : Dict ) ->Any: snake_case_ = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification''' snake_case_ = VideoMAEFeatureExtractor( size={'''shortest_edge''': 1_0} , crop_size={'''height''': 1_0, '''width''': 1_0} ) snake_case_ = pipeline( '''video-classification''' , model=_UpperCamelCase , feature_extractor=_UpperCamelCase , frame_sampling_rate=4 ) snake_case_ = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) snake_case_ = video_classifier(_UpperCamelCase , top_k=2 ) self.assertEqual( nested_simplify(_UpperCamelCase , decimals=4 ) , [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}] , ) snake_case_ = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(_UpperCamelCase , decimals=4 ) , [ [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}], [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}], ] , ) @require_tf def snake_case__( self : Optional[int] ) ->Any: pass
8
from sklearn.metrics import mean_squared_error import datasets lowerCAmelCase_ = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' lowerCAmelCase_ = '''\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. ''' lowerCAmelCase_ = ''' Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. "raw_values" : Returns a full set of errors in case of multioutput input. "uniform_average" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric("mse") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {\'mse\': 0.6123724356957945} If you\'re using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric("mse", "multilist") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {\'mse\': array([0.41666667, 1. ])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case_ ( datasets.Metric ): '''simple docstring''' def snake_case__( self : Optional[int] ) ->List[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def snake_case__( self : List[Any] ) ->Optional[int]: if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''' ) ), "references": datasets.Sequence(datasets.Value('''float''' ) ), } else: return { "predictions": datasets.Value('''float''' ), "references": datasets.Value('''float''' ), } def snake_case__( self : int , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any=None , _UpperCamelCase : Optional[int]="uniform_average" , _UpperCamelCase : Tuple=True ) ->Tuple: snake_case_ = mean_squared_error( _UpperCamelCase , _UpperCamelCase , sample_weight=_UpperCamelCase , multioutput=_UpperCamelCase , squared=_UpperCamelCase ) return {"mse": mse}
8
1
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class snake_case_ ( unittest.TestCase ): '''simple docstring''' @slow def snake_case__( self : int ) ->str: snake_case_ = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' ) snake_case_ = tf.convert_to_tensor( [[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" snake_case_ = model(_UpperCamelCase )['''last_hidden_state'''] snake_case_ = tf.TensorShape((1, 1_0, 7_6_8) ) self.assertEqual(output.shape , _UpperCamelCase ) # compare the actual values for a slice. snake_case_ = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
8
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = [] if len(SCREAMING_SNAKE_CASE__ ) == 1: return [nums.copy()] for _ in range(len(SCREAMING_SNAKE_CASE__ ) ): snake_case_ = nums.pop(0 ) snake_case_ = permute(SCREAMING_SNAKE_CASE__ ) for perm in permutations: perm.append(SCREAMING_SNAKE_CASE__ ) result.extend(SCREAMING_SNAKE_CASE__ ) nums.append(SCREAMING_SNAKE_CASE__ ) return result def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): def backtrack(SCREAMING_SNAKE_CASE__ ): if start == len(SCREAMING_SNAKE_CASE__ ) - 1: output.append(nums[:] ) else: for i in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ): snake_case_, snake_case_ = nums[i], nums[start] backtrack(start + 1 ) snake_case_, snake_case_ = nums[i], nums[start] # backtrack snake_case_ = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function lowerCAmelCase_ = permutea([1, 2, 3]) print(res) doctest.testmod()
8
1
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if len(SCREAMING_SNAKE_CASE__ ) <= 1: return lst snake_case_ = 1 while i < len(SCREAMING_SNAKE_CASE__ ): if lst[i - 1] <= lst[i]: i += 1 else: snake_case_, snake_case_ = lst[i], lst[i - 1] i -= 1 if i == 0: snake_case_ = 1 return lst if __name__ == "__main__": lowerCAmelCase_ = input('''Enter numbers separated by a comma:\n''').strip() lowerCAmelCase_ = [int(item) for item in user_input.split(''',''')] print(gnome_sort(unsorted))
8
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''XGLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''XGLMTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XGLMForCausalLM''', '''XGLMModel''', '''XGLMPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''FlaxXGLMForCausalLM''', '''FlaxXGLMModel''', '''FlaxXGLMPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXGLMForCausalLM''', '''TFXGLMModel''', '''TFXGLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
8
1
import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase_ = 16 lowerCAmelCase_ = 32 def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 16 ): snake_case_ = AutoTokenizer.from_pretrained('''bert-base-cased''' ) snake_case_ = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(SCREAMING_SNAKE_CASE__ ): # max_length=None => use the model max length (it's actually the default) snake_case_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): snake_case_ = datasets.map( SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library snake_case_ = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(SCREAMING_SNAKE_CASE__ ): # On TPU it's best to pad everything to the same length or training will be very slow. snake_case_ = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": snake_case_ = 16 elif accelerator.mixed_precision != "no": snake_case_ = 8 else: snake_case_ = None return tokenizer.pad( SCREAMING_SNAKE_CASE__ , padding='''longest''' , max_length=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , ) # Instantiate dataloaders. snake_case_ = DataLoader( tokenized_datasets['''train'''] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ ) snake_case_ = DataLoader( tokenized_datasets['''validation'''] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowerCAmelCase_ = mocked_dataloaders # noqa: F811 def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , SCREAMING_SNAKE_CASE__ ) == "1": snake_case_ = 2 # Initialize accelerator snake_case_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs snake_case_ = config['''lr'''] snake_case_ = int(config['''num_epochs'''] ) snake_case_ = int(config['''seed'''] ) snake_case_ = int(config['''batch_size'''] ) snake_case_ = evaluate.load('''glue''' , '''mrpc''' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=SCREAMING_SNAKE_CASE__ ) def inner_training_loop(SCREAMING_SNAKE_CASE__ ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(SCREAMING_SNAKE_CASE__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) snake_case_ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=SCREAMING_SNAKE_CASE__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). snake_case_ = model.to(accelerator.device ) # Instantiate optimizer snake_case_ = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE__ ) snake_case_, snake_case_ = get_dataloaders(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Instantiate scheduler snake_case_ = get_linear_schedule_with_warmup( optimizer=SCREAMING_SNAKE_CASE__ , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE__ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ = accelerator.prepare( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Now we train the model for epoch in range(SCREAMING_SNAKE_CASE__ ): model.train() for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) snake_case_ = model(**SCREAMING_SNAKE_CASE__ ) snake_case_ = outputs.loss accelerator.backward(SCREAMING_SNAKE_CASE__ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): snake_case_ = model(**SCREAMING_SNAKE_CASE__ ) snake_case_ = outputs.logits.argmax(dim=-1 ) snake_case_, snake_case_ = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ , ) snake_case_ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , SCREAMING_SNAKE_CASE__ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def __SCREAMING_SNAKE_CASE (): snake_case_ = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) snake_case_ = parser.parse_args() snake_case_ = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": main()
8
from ..utils import DummyObject, requires_backends class snake_case_ ( metaclass=__A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = ["note_seq"] def __init__( self : Optional[int] , *_UpperCamelCase : str , **_UpperCamelCase : Optional[int] ) ->Any: requires_backends(self , ['''note_seq'''] ) @classmethod def snake_case__( cls : int , *_UpperCamelCase : Any , **_UpperCamelCase : List[Any] ) ->int: requires_backends(cls , ['''note_seq'''] ) @classmethod def snake_case__( cls : Dict , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Union[str, Any] ) ->List[str]: requires_backends(cls , ['''note_seq'''] )
8
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging lowerCAmelCase_ = logging.get_logger(__name__) if is_vision_available(): import PIL class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = ["pixel_values"] def __init__( self : List[str] , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : bool = True , _UpperCamelCase : Union[int, float] = 1 / 2_5_5 , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : bool = True , **_UpperCamelCase : Union[str, Any] , ) ->None: super().__init__(**_UpperCamelCase ) snake_case_ = size if size is not None else {'''shortest_edge''': 2_2_4} snake_case_ = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase ) snake_case_ = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4} snake_case_ = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase , param_name='''crop_size''' ) snake_case_ = do_resize snake_case_ = size snake_case_ = resample snake_case_ = do_center_crop snake_case_ = crop_size snake_case_ = do_rescale snake_case_ = rescale_factor snake_case_ = do_normalize snake_case_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN snake_case_ = image_std if image_std is not None else OPENAI_CLIP_STD snake_case_ = do_convert_rgb def snake_case__( self : List[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Optional[Any] , ) ->np.ndarray: snake_case_ = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) snake_case_ = get_resize_output_image_size(_UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=_UpperCamelCase ) return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def snake_case__( self : int , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : int , ) ->np.ndarray: snake_case_ = get_size_dict(_UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(_UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=_UpperCamelCase , **_UpperCamelCase ) def snake_case__( self : Tuple , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, float] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : str , ) ->Optional[Any]: return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def snake_case__( self : Any , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Optional[Any] , ) ->np.ndarray: return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def snake_case__( self : Union[str, Any] , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = None , _UpperCamelCase : bool = None , _UpperCamelCase : int = None , _UpperCamelCase : bool = None , _UpperCamelCase : float = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **_UpperCamelCase : Tuple , ) ->PIL.Image.Image: snake_case_ = do_resize if do_resize is not None else self.do_resize snake_case_ = size if size is not None else self.size snake_case_ = get_size_dict(_UpperCamelCase , param_name='''size''' , default_to_square=_UpperCamelCase ) snake_case_ = resample if resample is not None else self.resample snake_case_ = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case_ = crop_size if crop_size is not None else self.crop_size snake_case_ = get_size_dict(_UpperCamelCase , param_name='''crop_size''' , default_to_square=_UpperCamelCase ) snake_case_ = do_rescale if do_rescale is not None else self.do_rescale snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case_ = do_normalize if do_normalize is not None else self.do_normalize snake_case_ = image_mean if image_mean is not None else self.image_mean snake_case_ = image_std if image_std is not None else self.image_std snake_case_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb snake_case_ = make_list_of_images(_UpperCamelCase ) if not valid_images(_UpperCamelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: snake_case_ = [convert_to_rgb(_UpperCamelCase ) for image in images] # All transformations expect numpy arrays. snake_case_ = [to_numpy_array(_UpperCamelCase ) for image in images] if do_resize: snake_case_ = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images] if do_center_crop: snake_case_ = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images] if do_rescale: snake_case_ = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images] if do_normalize: snake_case_ = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images] snake_case_ = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images] snake_case_ = {'''pixel_values''': images} return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
8
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = "vit_msn" def __init__( self : Dict , _UpperCamelCase : Optional[int]=7_6_8 , _UpperCamelCase : Optional[Any]=1_2 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : str=3_0_7_2 , _UpperCamelCase : Tuple="gelu" , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : List[str]=0.02 , _UpperCamelCase : List[Any]=1e-06 , _UpperCamelCase : Any=2_2_4 , _UpperCamelCase : Optional[Any]=1_6 , _UpperCamelCase : Any=3 , _UpperCamelCase : str=True , **_UpperCamelCase : Any , ) ->int: super().__init__(**_UpperCamelCase ) snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = qkv_bias
8
1
import math def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(SCREAMING_SNAKE_CASE__ ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError('''This should never happen''' ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. lowerCAmelCase_ = '''Enter the base and the power separated by a comma: ''' lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''',''')) lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''',''')) # We find the log of each number, using the function res(), which takes two # arguments. lowerCAmelCase_ = res(xa, ya) lowerCAmelCase_ = res(xa, ya) # We check for the largest number if resa > resa: print('''Largest number is''', xa, '''^''', ya) elif resa > resa: print('''Largest number is''', xa, '''^''', ya) else: print('''Both are equal''')
8
from __future__ import annotations from math import pi, sqrt def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if inductance <= 0: raise ValueError('''Inductance cannot be 0 or negative''' ) elif capacitance <= 0: raise ValueError('''Capacitance cannot be 0 or negative''' ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
8
1
import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: snake_case_ = 128 elif "12-12" in model_name: snake_case_ = 12 snake_case_ = 12 elif "14-14" in model_name: snake_case_ = 14 snake_case_ = 14 elif "16-16" in model_name: snake_case_ = 16 snake_case_ = 16 else: raise ValueError('''Model not supported''' ) snake_case_ = '''huggingface/label-files''' if "speech-commands" in model_name: snake_case_ = 35 snake_case_ = '''speech-commands-v2-id2label.json''' else: snake_case_ = 527 snake_case_ = '''audioset-id2label.json''' snake_case_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) ) snake_case_ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} return config def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if "module.v" in name: snake_case_ = name.replace('''module.v''' , '''audio_spectrogram_transformer''' ) if "cls_token" in name: snake_case_ = name.replace('''cls_token''' , '''embeddings.cls_token''' ) if "dist_token" in name: snake_case_ = name.replace('''dist_token''' , '''embeddings.distillation_token''' ) if "pos_embed" in name: snake_case_ = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' ) if "patch_embed.proj" in name: snake_case_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) # transformer blocks if "blocks" in name: snake_case_ = name.replace('''blocks''' , '''encoder.layer''' ) if "attn.proj" in name: snake_case_ = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: snake_case_ = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: snake_case_ = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: snake_case_ = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: snake_case_ = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: snake_case_ = name.replace('''mlp.fc2''' , '''output.dense''' ) # final layernorm if "audio_spectrogram_transformer.norm" in name: snake_case_ = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' ) # classifier head if "module.mlp_head.0" in name: snake_case_ = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' ) if "module.mlp_head.1" in name: snake_case_ = name.replace('''module.mlp_head.1''' , '''classifier.dense''' ) return name def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): for key in orig_state_dict.copy().keys(): snake_case_ = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ ) if "qkv" in key: snake_case_ = key.split('''.''' ) snake_case_ = int(key_split[3] ) snake_case_ = config.hidden_size if "weight" in key: snake_case_ = val[:dim, :] snake_case_ = val[dim : dim * 2, :] snake_case_ = val[-dim:, :] else: snake_case_ = val[:dim] snake_case_ = val[dim : dim * 2] snake_case_ = val[-dim:] else: snake_case_ = val return orig_state_dict def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = [ '''module.v.head.weight''', '''module.v.head.bias''', '''module.v.head_dist.weight''', '''module.v.head_dist.bias''', ] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @torch.no_grad() def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ): snake_case_ = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE__ ) snake_case_ = { '''ast-finetuned-audioset-10-10-0.4593''': ( '''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.450''': ( '''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.448''': ( '''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.448-v2''': ( '''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1''' ), '''ast-finetuned-audioset-12-12-0.447''': ( '''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1''' ), '''ast-finetuned-audioset-14-14-0.443''': ( '''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1''' ), '''ast-finetuned-audioset-16-16-0.442''': ( '''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1''' ), '''ast-finetuned-speech-commands-v2''': ( '''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1''' ), } # load original state_dict snake_case_ = model_name_to_url[model_name] snake_case_ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' ) # remove some keys remove_keys(SCREAMING_SNAKE_CASE__ ) # rename some keys snake_case_ = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # load 🤗 model snake_case_ = ASTForAudioClassification(SCREAMING_SNAKE_CASE__ ) model.eval() model.load_state_dict(SCREAMING_SNAKE_CASE__ ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 snake_case_ = -4.267_7393 if '''speech-commands''' not in model_name else -6.84_5978 snake_case_ = 4.568_9974 if '''speech-commands''' not in model_name else 5.565_4526 snake_case_ = 1024 if '''speech-commands''' not in model_name else 128 snake_case_ = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ ) if "speech-commands" in model_name: snake_case_ = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' ) snake_case_ = dataset[0]['''audio''']['''array'''] else: snake_case_ = hf_hub_download( repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , ) snake_case_, snake_case_ = torchaudio.load(SCREAMING_SNAKE_CASE__ ) snake_case_ = waveform.squeeze().numpy() snake_case_ = feature_extractor(SCREAMING_SNAKE_CASE__ , sampling_rate=16000 , return_tensors='''pt''' ) # forward pass snake_case_ = model(**SCREAMING_SNAKE_CASE__ ) snake_case_ = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": snake_case_ = torch.tensor([-0.8760, -7.0042, -8.6602] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": snake_case_ = torch.tensor([-1.1986, -7.0903, -8.2718] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": snake_case_ = torch.tensor([-2.6128, -8.0080, -9.4344] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": snake_case_ = torch.tensor([-1.5080, -7.4534, -8.8917] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": snake_case_ = torch.tensor([-0.5050, -6.5833, -8.0843] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": snake_case_ = torch.tensor([-0.3826, -7.0336, -8.2413] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": snake_case_ = torch.tensor([-1.2113, -6.9101, -8.3470] ) elif model_name == "ast-finetuned-speech-commands-v2": snake_case_ = torch.tensor([6.1589, -8.0566, -8.7984] ) else: raise ValueError('''Unknown model name''' ) if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ): raise ValueError('''Logits don\'t match''' ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' ) feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ ) if push_to_hub: print('''Pushing model and feature extractor to the hub...''' ) model.push_to_hub(F'''MIT/{model_name}''' ) feature_extractor.push_to_hub(F'''MIT/{model_name}''' ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''ast-finetuned-audioset-10-10-0.4593''', type=str, help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowerCAmelCase_ = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
8
import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return x + 2 class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Optional[Any] ) ->int: snake_case_ = '''x = 3''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result == 3 self.assertDictEqual(_UpperCamelCase , {'''x''': 3} ) snake_case_ = '''x = y''' snake_case_ = {'''y''': 5} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 5, '''y''': 5} ) def snake_case__( self : Dict ) ->Optional[int]: snake_case_ = '''y = add_two(x)''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} ) # Won't work without the tool with CaptureStdout() as out: snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result is None assert "tried to execute add_two" in out.out def snake_case__( self : Union[str, Any] ) ->Dict: snake_case_ = '''x = 3''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result == 3 self.assertDictEqual(_UpperCamelCase , {'''x''': 3} ) def snake_case__( self : Optional[int] ) ->Optional[int]: snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} ) self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} ) def snake_case__( self : Dict ) ->str: snake_case_ = '''x = 3\ny = 5''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} ) def snake_case__( self : str ) ->Tuple: snake_case_ = '''text = f\'This is x: {x}.\'''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''text''': '''This is x: 3.'''} ) def snake_case__( self : Optional[Any] ) ->List[str]: snake_case_ = '''if x <= 3:\n y = 2\nelse:\n y = 5''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 2} ) snake_case_ = {'''x''': 8} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 8, '''y''': 5} ) def snake_case__( self : str ) ->str: snake_case_ = '''test_list = [x, add_two(x)]''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , [3, 5] ) self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} ) def snake_case__( self : Any ) ->List[Any]: snake_case_ = '''y = x''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result == 3 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 3} ) def snake_case__( self : Optional[int] ) ->Dict: snake_case_ = '''test_list = [x, add_two(x)]\ntest_list[1]''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} ) snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} ) def snake_case__( self : Optional[Any] ) ->int: snake_case_ = '''x = 0\nfor i in range(3):\n x = i''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {'''range''': range} , state=_UpperCamelCase ) assert result == 2 self.assertDictEqual(_UpperCamelCase , {'''x''': 2, '''i''': 2} )
8
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: lowerCAmelCase_ = None lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase_ = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''', }, } lowerCAmelCase_ = { '''albert-base-v1''': 5_12, '''albert-large-v1''': 5_12, '''albert-xlarge-v1''': 5_12, '''albert-xxlarge-v1''': 5_12, '''albert-base-v2''': 5_12, '''albert-large-v2''': 5_12, '''albert-xlarge-v2''': 5_12, '''albert-xxlarge-v2''': 5_12, } lowerCAmelCase_ = '''▁''' class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : Dict = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : List[Any] = AlbertTokenizer def __init__( self : Tuple , _UpperCamelCase : Any=None , _UpperCamelCase : str=None , _UpperCamelCase : Tuple=True , _UpperCamelCase : Optional[Any]=True , _UpperCamelCase : Union[str, Any]=False , _UpperCamelCase : str="[CLS]" , _UpperCamelCase : Dict="[SEP]" , _UpperCamelCase : Tuple="<unk>" , _UpperCamelCase : str="[SEP]" , _UpperCamelCase : Optional[int]="<pad>" , _UpperCamelCase : Tuple="[CLS]" , _UpperCamelCase : int="[MASK]" , **_UpperCamelCase : Tuple , ) ->Tuple: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. snake_case_ = ( AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase , normalized=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token ) super().__init__( _UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , remove_space=_UpperCamelCase , keep_accents=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , **_UpperCamelCase , ) snake_case_ = do_lower_case snake_case_ = remove_space snake_case_ = keep_accents snake_case_ = vocab_file snake_case_ = False if not self.vocab_file else True def snake_case__( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def snake_case__( self : str , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case__( self : str , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(_UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case_ = os.path.join( _UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ): copyfile(self.vocab_file , _UpperCamelCase ) return (out_vocab_file,)
8
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Any , _UpperCamelCase : Any , _UpperCamelCase : Tuple ) ->List[Any]: return f'''gaussian_noise_s={seed}_shape={'_'.join([str(_UpperCamelCase ) for s in shape] )}.npy''' def snake_case__( self : Any ) ->List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() def snake_case__( self : int , _UpperCamelCase : Union[str, Any]=0 , _UpperCamelCase : int=(4, 4, 6_4, 6_4) , _UpperCamelCase : Optional[int]=False ) ->Tuple: snake_case_ = jnp.bfloataa if fpaa else jnp.floataa snake_case_ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase , _UpperCamelCase ) ) , dtype=_UpperCamelCase ) return image def snake_case__( self : List[Any] , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Optional[int]="CompVis/stable-diffusion-v1-4" ) ->Optional[Any]: snake_case_ = jnp.bfloataa if fpaa else jnp.floataa snake_case_ = '''bf16''' if fpaa else None snake_case_, snake_case_ = FlaxUNetaDConditionModel.from_pretrained( _UpperCamelCase , subfolder='''unet''' , dtype=_UpperCamelCase , revision=_UpperCamelCase ) return model, params def snake_case__( self : Dict , _UpperCamelCase : List[Any]=0 , _UpperCamelCase : Tuple=(4, 7_7, 7_6_8) , _UpperCamelCase : List[Any]=False ) ->int: snake_case_ = jnp.bfloataa if fpaa else jnp.floataa snake_case_ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase , _UpperCamelCase ) ) , dtype=_UpperCamelCase ) return hidden_states @parameterized.expand( [ # fmt: off [8_3, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], [1_7, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], [3, 1_0_0_0, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], # fmt: on ] ) def snake_case__( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] ) ->Union[str, Any]: snake_case_, snake_case_ = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=_UpperCamelCase ) snake_case_ = self.get_latents(_UpperCamelCase , fpaa=_UpperCamelCase ) snake_case_ = self.get_encoder_hidden_states(_UpperCamelCase , fpaa=_UpperCamelCase ) snake_case_ = model.apply( {'''params''': params} , _UpperCamelCase , jnp.array(_UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCamelCase , ).sample assert sample.shape == latents.shape snake_case_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) snake_case_ = jnp.array(_UpperCamelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [8_3, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], [1_7, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], [3, 1_0_0_0, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], # fmt: on ] ) def snake_case__( self : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str ) ->Dict: snake_case_, snake_case_ = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=_UpperCamelCase ) snake_case_ = self.get_latents(_UpperCamelCase , shape=(4, 4, 9_6, 9_6) , fpaa=_UpperCamelCase ) snake_case_ = self.get_encoder_hidden_states(_UpperCamelCase , shape=(4, 7_7, 1_0_2_4) , fpaa=_UpperCamelCase ) snake_case_ = model.apply( {'''params''': params} , _UpperCamelCase , jnp.array(_UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCamelCase , ).sample assert sample.shape == latents.shape snake_case_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) snake_case_ = jnp.array(_UpperCamelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-2 )
8
1
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if not numbers: return 0 if not isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) or not all( isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for number in numbers ): raise ValueError('''numbers must be an iterable of integers''' ) snake_case_ = snake_case_ = snake_case_ = numbers[0] for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ): # update the maximum and minimum subarray products snake_case_ = numbers[i] if number < 0: snake_case_, snake_case_ = min_till_now, max_till_now snake_case_ = max(SCREAMING_SNAKE_CASE__ , max_till_now * number ) snake_case_ = min(SCREAMING_SNAKE_CASE__ , min_till_now * number ) # update the maximum product found till now snake_case_ = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return max_prod
8
import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def __SCREAMING_SNAKE_CASE (*SCREAMING_SNAKE_CASE__ ): if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = list(SCREAMING_SNAKE_CASE__ ) for i in range(len(SCREAMING_SNAKE_CASE__ ) ): snake_case_ = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = [ '''CUDA out of memory.''', # CUDA OOM '''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU '''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM ] if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 128 ): if function is None: return functools.partial(SCREAMING_SNAKE_CASE__ , starting_batch_size=SCREAMING_SNAKE_CASE__ ) snake_case_ = starting_batch_size def decorator(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() snake_case_ = list(inspect.signature(SCREAMING_SNAKE_CASE__ ).parameters.keys() ) # Guard against user error if len(SCREAMING_SNAKE_CASE__ ) < (len(SCREAMING_SNAKE_CASE__ ) + 1): snake_case_ = ''', '''.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( F'''Batch size was passed into `{function.__name__}` as the first argument when called.''' F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' ) while True: if batch_size == 0: raise RuntimeError('''No executable batch size found, reached zero.''' ) try: return function(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) except Exception as e: if should_reduce_batch_size(SCREAMING_SNAKE_CASE__ ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
8
1
from collections.abc import Callable import numpy as np def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = int(np.ceil((x_end - xa) / step_size ) ) snake_case_ = np.zeros((n + 1,) ) snake_case_ = ya snake_case_ = xa for k in range(SCREAMING_SNAKE_CASE__ ): snake_case_ = y[k] + step_size * ode_func(SCREAMING_SNAKE_CASE__ , y[k] ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
8
from __future__ import annotations def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return [ord(SCREAMING_SNAKE_CASE__ ) - 96 for elem in plain] def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return "".join(chr(elem + 96 ) for elem in encoded ) def __SCREAMING_SNAKE_CASE (): snake_case_ = encode(input('''-> ''' ).strip().lower() ) print('''Encoded: ''' , SCREAMING_SNAKE_CASE__ ) print('''Decoded:''' , decode(SCREAMING_SNAKE_CASE__ ) ) if __name__ == "__main__": main()
8
1
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class snake_case_ : '''simple docstring''' def __init__( self : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[str]=1_3 , _UpperCamelCase : int=3_2 , _UpperCamelCase : Optional[Any]=2 , _UpperCamelCase : Any=3 , _UpperCamelCase : Optional[int]=1_6 , _UpperCamelCase : Optional[Any]=[1, 2, 1] , _UpperCamelCase : Tuple=[2, 2, 4] , _UpperCamelCase : List[Any]=2 , _UpperCamelCase : Optional[int]=2.0 , _UpperCamelCase : Any=True , _UpperCamelCase : Union[str, Any]=0.0 , _UpperCamelCase : int=0.0 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : List[str]="gelu" , _UpperCamelCase : Union[str, Any]=False , _UpperCamelCase : List[str]=True , _UpperCamelCase : Optional[Any]=0.02 , _UpperCamelCase : List[Any]=1e-5 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : int=None , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Union[str, Any]=1_0 , _UpperCamelCase : int=8 , _UpperCamelCase : Tuple=["stage1", "stage2", "stage3"] , _UpperCamelCase : Union[str, Any]=[1, 2, 3] , ) ->Any: snake_case_ = parent snake_case_ = batch_size snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = embed_dim snake_case_ = depths snake_case_ = num_heads snake_case_ = window_size snake_case_ = mlp_ratio snake_case_ = qkv_bias snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = drop_path_rate snake_case_ = hidden_act snake_case_ = use_absolute_embeddings snake_case_ = patch_norm snake_case_ = layer_norm_eps snake_case_ = initializer_range snake_case_ = is_training snake_case_ = scope snake_case_ = use_labels snake_case_ = type_sequence_label_size snake_case_ = encoder_stride snake_case_ = out_features snake_case_ = out_indices def snake_case__( self : Tuple ) ->List[Any]: snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ = self.get_config() return config, pixel_values, labels def snake_case__( self : List[Any] ) ->Dict: return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def snake_case__( self : int , _UpperCamelCase : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Any ) ->Any: snake_case_ = MaskFormerSwinModel(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() snake_case_ = model(_UpperCamelCase ) snake_case_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) snake_case_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def snake_case__( self : Dict , _UpperCamelCase : str , _UpperCamelCase : Tuple , _UpperCamelCase : Tuple ) ->Tuple: snake_case_ = MaskFormerSwinBackbone(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() snake_case_ = model(_UpperCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] ) # verify ValueError with self.parent.assertRaises(_UpperCamelCase ): snake_case_ = ['''stem'''] snake_case_ = MaskFormerSwinBackbone(config=_UpperCamelCase ) def snake_case__( self : Tuple ) ->List[Any]: snake_case_ = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_ = config_and_inputs snake_case_ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class snake_case_ ( __A , __A , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE : List[Any] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {} SCREAMING_SNAKE_CASE : List[Any] = False SCREAMING_SNAKE_CASE : Optional[int] = False SCREAMING_SNAKE_CASE : Union[str, Any] = False SCREAMING_SNAKE_CASE : Dict = False SCREAMING_SNAKE_CASE : List[Any] = False def snake_case__( self : str ) ->Tuple: snake_case_ = MaskFormerSwinModelTester(self ) snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , embed_dim=3_7 ) @require_torch_multi_gpu @unittest.skip( reason=( '''`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with''' ''' `nn.DataParallel`''' ) ) def snake_case__( self : List[str] ) ->List[str]: pass def snake_case__( self : int ) ->List[str]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case__( self : int ) ->str: return def snake_case__( self : Optional[Any] ) ->Tuple: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def snake_case__( self : str ) ->Union[str, Any]: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_UpperCamelCase ) @unittest.skip('''Swin does not use inputs_embeds''' ) def snake_case__( self : List[str] ) ->Tuple: pass @unittest.skip('''Swin does not support feedforward chunking''' ) def snake_case__( self : List[Any] ) ->str: pass def snake_case__( self : Optional[int] ) ->List[Any]: snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = model_class(_UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) ) def snake_case__( self : Optional[int] ) ->Union[str, Any]: snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = model_class(_UpperCamelCase ) snake_case_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ = [*signature.parameters.keys()] snake_case_ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _UpperCamelCase ) @unittest.skip(reason='''MaskFormerSwin is only used as backbone and doesn\'t support output_attentions''' ) def snake_case__( self : Optional[Any] ) ->List[str]: pass @unittest.skip(reason='''MaskFormerSwin is only used as an internal backbone''' ) def snake_case__( self : int ) ->Optional[Any]: pass def snake_case__( self : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] ) ->List[Any]: snake_case_ = model_class(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() with torch.no_grad(): snake_case_ = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) ) snake_case_ = outputs.hidden_states snake_case_ = getattr( self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase ) # Swin has a different seq_length snake_case_ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def snake_case__( self : List[str] ) ->Optional[int]: snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: snake_case_ = True self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ = True self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) def snake_case__( self : str ) ->Any: snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ = 3 snake_case_ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) snake_case_ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) snake_case_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: snake_case_ = True self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ = True self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , (padded_height, padded_width) ) @unittest.skip(reason='''MaskFormerSwin doesn\'t have pretrained checkpoints''' ) def snake_case__( self : List[str] ) ->Dict: pass @unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' ) def snake_case__( self : List[Any] ) ->Any: pass @unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' ) def snake_case__( self : List[str] ) ->Optional[Any]: pass def snake_case__( self : Optional[int] ) ->Union[str, Any]: snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(_UpperCamelCase : Any ): snake_case_ = 0 return t def check_equivalence(_UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : List[Any]={} ): with torch.no_grad(): snake_case_ = model(**_UpperCamelCase , return_dict=_UpperCamelCase , **_UpperCamelCase ) snake_case_ = model(**_UpperCamelCase , return_dict=_UpperCamelCase , **_UpperCamelCase ).to_tuple() def recursive_check(_UpperCamelCase : Tuple , _UpperCamelCase : List[str] ): if isinstance(_UpperCamelCase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(_UpperCamelCase , _UpperCamelCase ): recursive_check(_UpperCamelCase , _UpperCamelCase ) elif isinstance(_UpperCamelCase , _UpperCamelCase ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(_UpperCamelCase , _UpperCamelCase ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(_UpperCamelCase ) , set_nan_tensor_to_zero(_UpperCamelCase ) , atol=1e-5 ) , msg=( '''Tuple and dict output are not equal. Difference:''' f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:''' f''' {torch.isnan(_UpperCamelCase ).any()} and `inf`: {torch.isinf(_UpperCamelCase )}. Dict has''' f''' `nan`: {torch.isnan(_UpperCamelCase ).any()} and `inf`: {torch.isinf(_UpperCamelCase )}.''' ) , ) recursive_check(_UpperCamelCase , _UpperCamelCase ) for model_class in self.all_model_classes: snake_case_ = model_class(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() snake_case_ = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) snake_case_ = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) check_equivalence(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) snake_case_ = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase ) snake_case_ = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase ) check_equivalence(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) snake_case_ = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) snake_case_ = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) check_equivalence(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , {'''output_hidden_states''': True} ) snake_case_ = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase ) snake_case_ = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase ) check_equivalence(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , {'''output_hidden_states''': True} ) @require_torch class snake_case_ ( unittest.TestCase , __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = (MaskFormerSwinBackbone,) if is_torch_available() else () SCREAMING_SNAKE_CASE : int = MaskFormerSwinConfig def snake_case__( self : Any ) ->Union[str, Any]: snake_case_ = MaskFormerSwinModelTester(self ) def snake_case__( self : Dict ) ->str: snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ = inputs_dict['''pixel_values'''].shape[0] for backbone_class in self.all_model_classes: snake_case_ = backbone_class(_UpperCamelCase ) backbone.to(_UpperCamelCase ) backbone.eval() snake_case_ = backbone(**_UpperCamelCase ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , _UpperCamelCase ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True snake_case_ = backbone(**_UpperCamelCase , output_hidden_states=_UpperCamelCase ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) snake_case_, snake_case_, snake_case_ = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: snake_case_ = backbone(**_UpperCamelCase , output_attentions=_UpperCamelCase ) self.assertIsNotNone(outputs.attentions )
8
import math def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(SCREAMING_SNAKE_CASE__ ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError('''This should never happen''' ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. lowerCAmelCase_ = '''Enter the base and the power separated by a comma: ''' lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''',''')) lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''',''')) # We find the log of each number, using the function res(), which takes two # arguments. lowerCAmelCase_ = res(xa, ya) lowerCAmelCase_ = res(xa, ya) # We check for the largest number if resa > resa: print('''Largest number is''', xa, '''^''', ya) elif resa > resa: print('''Largest number is''', xa, '''^''', ya) else: print('''Both are equal''')
8
1
import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class snake_case_ ( __A ): '''simple docstring''' def snake_case__( self : List[Any] ) ->Any: snake_case_ = tempfile.mkdtemp() snake_case_ = 5 # Realm tok snake_case_ = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''test''', '''question''', '''this''', '''is''', '''the''', '''first''', '''second''', '''third''', '''fourth''', '''fifth''', '''record''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] snake_case_ = os.path.join(self.tmpdirname , '''realm_tokenizer''' ) os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase ) snake_case_ = os.path.join(_UpperCamelCase , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) snake_case_ = os.path.join(self.tmpdirname , '''realm_block_records''' ) os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase ) def snake_case__( self : List[str] ) ->RealmTokenizer: return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) ) def snake_case__( self : Any ) ->Union[str, Any]: shutil.rmtree(self.tmpdirname ) def snake_case__( self : List[str] ) ->List[str]: snake_case_ = RealmConfig(num_block_records=self.num_block_records ) return config def snake_case__( self : Any ) ->List[Any]: snake_case_ = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''question''': ['''foo''', '''bar'''], '''answers''': [['''Foo''', '''Bar'''], ['''Bar''']], } ) return dataset def snake_case__( self : List[str] ) ->List[str]: snake_case_ = np.array( [ B'''This is the first record''', B'''This is the second record''', B'''This is the third record''', B'''This is the fourth record''', B'''This is the fifth record''', B'''This is a longer longer longer record''', ] , dtype=_UpperCamelCase , ) return block_records def snake_case__( self : List[str] ) ->Dict: snake_case_ = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def snake_case__( self : Dict ) ->List[str]: snake_case_ = self.get_config() snake_case_ = self.get_dummy_retriever() snake_case_ = retriever.tokenizer snake_case_ = np.array([0, 3] , dtype='''long''' ) snake_case_ = tokenizer(['''Test question'''] ).input_ids snake_case_ = tokenizer( ['''the fourth'''] , add_special_tokens=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_attention_mask=_UpperCamelCase , ).input_ids snake_case_ = config.reader_seq_len snake_case_, snake_case_, snake_case_, snake_case_ = retriever( _UpperCamelCase , _UpperCamelCase , answer_ids=_UpperCamelCase , max_length=_UpperCamelCase , return_tensors='''np''' ) self.assertEqual(len(_UpperCamelCase ) , 2 ) self.assertEqual(len(_UpperCamelCase ) , 2 ) self.assertEqual(len(_UpperCamelCase ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 1_0) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 1_0) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 1_0) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 1_0) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , ) def snake_case__( self : Optional[int] ) ->str: snake_case_ = self.get_config() snake_case_ = self.get_dummy_retriever() snake_case_ = retriever.tokenizer snake_case_ = np.array([0, 3, 5] , dtype='''long''' ) snake_case_ = tokenizer(['''Test question'''] ).input_ids snake_case_ = tokenizer( ['''the fourth''', '''longer longer'''] , add_special_tokens=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_attention_mask=_UpperCamelCase , ).input_ids snake_case_ = config.reader_seq_len snake_case_, snake_case_, snake_case_, snake_case_ = retriever( _UpperCamelCase , _UpperCamelCase , answer_ids=_UpperCamelCase , max_length=_UpperCamelCase , return_tensors='''np''' ) self.assertEqual([False, True, True] , _UpperCamelCase ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _UpperCamelCase ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _UpperCamelCase ) def snake_case__( self : Any ) ->Dict: snake_case_ = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) ) # Test local path snake_case_ = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) ) self.assertEqual(retriever.block_records[0] , B'''This is the first record''' ) # Test mocked remote path with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download: snake_case_ = os.path.join( os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME ) snake_case_ = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' ) self.assertEqual(retriever.block_records[0] , B'''This is the first record''' )
8
import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'''vocab_file''': '''spiece.model'''} lowerCAmelCase_ = { '''vocab_file''': { '''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''', '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model''' ), } } lowerCAmelCase_ = { '''google/bigbird-roberta-base''': 40_96, '''google/bigbird-roberta-large''': 40_96, '''google/bigbird-base-trivia-itc''': 40_96, } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : List[Any] = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE : List[int] = [] def __init__( self : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict="<unk>" , _UpperCamelCase : List[str]="<s>" , _UpperCamelCase : Tuple="</s>" , _UpperCamelCase : Any="<pad>" , _UpperCamelCase : Any="[SEP]" , _UpperCamelCase : Optional[Any]="[MASK]" , _UpperCamelCase : Any="[CLS]" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Dict , ) ->None: snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else bos_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else eos_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else unk_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else pad_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else cls_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else sep_token # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , sep_token=_UpperCamelCase , mask_token=_UpperCamelCase , cls_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , ) snake_case_ = vocab_file snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCamelCase ) @property def snake_case__( self : str ) ->List[Any]: return self.sp_model.get_piece_size() def snake_case__( self : int ) ->Union[str, Any]: snake_case_ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Tuple ) ->Any: snake_case_ = self.__dict__.copy() snake_case_ = None return state def __setstate__( self : str , _UpperCamelCase : List[Any] ) ->List[str]: snake_case_ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): snake_case_ = {} snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def snake_case__( self : Optional[int] , _UpperCamelCase : str ) ->List[str]: return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase ) def snake_case__( self : str , _UpperCamelCase : List[str] ) ->Tuple: return self.sp_model.piece_to_id(_UpperCamelCase ) def snake_case__( self : Union[str, Any] , _UpperCamelCase : str ) ->List[Any]: snake_case_ = self.sp_model.IdToPiece(_UpperCamelCase ) return token def snake_case__( self : Dict , _UpperCamelCase : Optional[int] ) ->List[str]: snake_case_ = [] snake_case_ = '''''' snake_case_ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_UpperCamelCase ) + token snake_case_ = True snake_case_ = [] else: current_sub_tokens.append(_UpperCamelCase ) snake_case_ = False out_string += self.sp_model.decode(_UpperCamelCase ) return out_string.strip() def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : bool = False , _UpperCamelCase : bool = None , _UpperCamelCase : bool = True , **_UpperCamelCase : List[str] , ) ->str: snake_case_ = kwargs.pop('''use_source_tokenizer''' , _UpperCamelCase ) snake_case_ = self.convert_ids_to_tokens(_UpperCamelCase , skip_special_tokens=_UpperCamelCase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 snake_case_ = [] snake_case_ = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) ) snake_case_ = [] sub_texts.append(_UpperCamelCase ) else: current_sub_text.append(_UpperCamelCase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: snake_case_ = re.sub(R''' (\[(MASK|SEP)\])''' , R'''\1''' , ''' '''.join(_UpperCamelCase ) ) else: snake_case_ = ''''''.join(_UpperCamelCase ) snake_case_ = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: snake_case_ = self.clean_up_tokenization(_UpperCamelCase ) return clean_text else: return text def snake_case__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]: if not os.path.isdir(_UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case_ = os.path.join( _UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCamelCase , '''wb''' ) as fi: snake_case_ = self.sp_model.serialized_model_proto() fi.write(_UpperCamelCase ) return (out_vocab_file,) def snake_case__( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def snake_case__( self : List[str] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) ->List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCamelCase )) + [1] return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1] def snake_case__( self : List[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
8
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''XGLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''XGLMTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XGLMForCausalLM''', '''XGLMModel''', '''XGLMPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''FlaxXGLMForCausalLM''', '''FlaxXGLMModel''', '''FlaxXGLMPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXGLMForCausalLM''', '''TFXGLMModel''', '''TFXGLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
8
from __future__ import annotations from collections.abc import Generator def __SCREAMING_SNAKE_CASE (): snake_case_ = {} snake_case_ = 2 while True: snake_case_ = factor_map.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if factor: snake_case_ = factor + prime while x in factor_map: x += factor snake_case_ = factor else: snake_case_ = prime yield prime prime += 1 def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 1E10 ): snake_case_ = sieve() snake_case_ = 1 while True: snake_case_ = next(SCREAMING_SNAKE_CASE__ ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(SCREAMING_SNAKE_CASE__ ) n += 2 if __name__ == "__main__": print(solution())
8
1
import copy import random from transformers import CLIPTokenizer class snake_case_ ( __A ): '''simple docstring''' def __init__( self : int , *_UpperCamelCase : int , **_UpperCamelCase : int ) ->Dict: super().__init__(*_UpperCamelCase , **_UpperCamelCase ) snake_case_ = {} def snake_case__( self : Optional[Any] , _UpperCamelCase : Optional[Any] , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : List[str] ) ->Any: snake_case_ = super().add_tokens(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase ) if num_added_tokens == 0: raise ValueError( f'''The tokenizer already contains the token {placeholder_token}. Please pass a different''' ''' `placeholder_token` that is not already in the tokenizer.''' ) def snake_case__( self : Union[str, Any] , _UpperCamelCase : Any , *_UpperCamelCase : Dict , _UpperCamelCase : Optional[Any]=1 , **_UpperCamelCase : Optional[Any] ) ->Any: snake_case_ = [] if num_vec_per_token == 1: self.try_adding_tokens(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase ) output.append(_UpperCamelCase ) else: snake_case_ = [] for i in range(_UpperCamelCase ): snake_case_ = placeholder_token + f'''_{i}''' self.try_adding_tokens(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase ) output.append(_UpperCamelCase ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( f'''The tokenizer already has placeholder token {token} that can get confused with''' f''' {placeholder_token}keep placeholder tokens independent''' ) snake_case_ = output def snake_case__( self : List[str] , _UpperCamelCase : Tuple , _UpperCamelCase : Dict=False , _UpperCamelCase : Union[str, Any]=1.0 ) ->Optional[Any]: if isinstance(_UpperCamelCase , _UpperCamelCase ): snake_case_ = [] for i in range(len(_UpperCamelCase ) ): output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=_UpperCamelCase ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: snake_case_ = self.token_map[placeholder_token] snake_case_ = tokens[: 1 + int(len(_UpperCamelCase ) * prop_tokens_to_load )] if vector_shuffle: snake_case_ = copy.copy(_UpperCamelCase ) random.shuffle(_UpperCamelCase ) snake_case_ = text.replace(_UpperCamelCase , ''' '''.join(_UpperCamelCase ) ) return text def __call__( self : List[str] , _UpperCamelCase : Any , *_UpperCamelCase : int , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Tuple=1.0 , **_UpperCamelCase : Optional[int] ) ->Optional[int]: return super().__call__( self.replace_placeholder_tokens_in_text( _UpperCamelCase , vector_shuffle=_UpperCamelCase , prop_tokens_to_load=_UpperCamelCase ) , *_UpperCamelCase , **_UpperCamelCase , ) def snake_case__( self : str , _UpperCamelCase : Any , *_UpperCamelCase : Any , _UpperCamelCase : Union[str, Any]=False , _UpperCamelCase : List[Any]=1.0 , **_UpperCamelCase : Any ) ->int: return super().encode( self.replace_placeholder_tokens_in_text( _UpperCamelCase , vector_shuffle=_UpperCamelCase , prop_tokens_to_load=_UpperCamelCase ) , *_UpperCamelCase , **_UpperCamelCase , )
8
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OPTForCausalLM''', '''OPTModel''', '''OPTPreTrainedModel''', '''OPTForSequenceClassification''', '''OPTForQuestionAnswering''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''FlaxOPTForCausalLM''', '''FlaxOPTModel''', '''FlaxOPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
8
1
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return int((input_a, input_a).count(0 ) == 0 ) def __SCREAMING_SNAKE_CASE (): assert and_gate(0 , 0 ) == 0 assert and_gate(0 , 1 ) == 0 assert and_gate(1 , 0 ) == 0 assert and_gate(1 , 1 ) == 1 if __name__ == "__main__": test_and_gate() print(and_gate(1, 0)) print(and_gate(0, 0)) print(and_gate(0, 1)) print(and_gate(1, 1))
8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = "philschmid/bart-large-cnn-samsum" SCREAMING_SNAKE_CASE : Tuple = ( "This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, " "and returns a summary of the text." ) SCREAMING_SNAKE_CASE : str = "summarizer" SCREAMING_SNAKE_CASE : str = AutoTokenizer SCREAMING_SNAKE_CASE : str = AutoModelForSeqaSeqLM SCREAMING_SNAKE_CASE : Optional[int] = ["text"] SCREAMING_SNAKE_CASE : Optional[int] = ["text"] def snake_case__( self : str , _UpperCamelCase : int ) ->Optional[int]: return self.pre_processor(_UpperCamelCase , return_tensors='''pt''' , truncation=_UpperCamelCase ) def snake_case__( self : Tuple , _UpperCamelCase : Optional[int] ) ->Tuple: return self.model.generate(**_UpperCamelCase )[0] def snake_case__( self : Optional[Any] , _UpperCamelCase : Optional[int] ) ->Any: return self.pre_processor.decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
8
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = { '''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''LlamaTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''LlamaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''LlamaForCausalLM''', '''LlamaModel''', '''LlamaPreTrainedModel''', '''LlamaForSequenceClassification''', ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
8
from collections import deque from .hash_table import HashTable class snake_case_ ( __A ): '''simple docstring''' def __init__( self : int , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->Tuple: super().__init__(*_UpperCamelCase , **_UpperCamelCase ) def snake_case__( self : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Dict ) ->Tuple: snake_case_ = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(_UpperCamelCase ) snake_case_ = self.values[key] def snake_case__( self : List[Any] ) ->str: return ( sum(self.charge_factor - len(_UpperCamelCase ) for slot in self.values ) / self.size_table * self.charge_factor ) def snake_case__( self : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int]=None ) ->str: if not ( len(self.values[key] ) == self.charge_factor and self.values.count(_UpperCamelCase ) == 0 ): return key return super()._collision_resolution(_UpperCamelCase , _UpperCamelCase )
8
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''', # See all CANINE models at https://huggingface.co/models?filter=canine } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = "canine" def __init__( self : str , _UpperCamelCase : Optional[Any]=7_6_8 , _UpperCamelCase : Optional[int]=1_2 , _UpperCamelCase : int=1_2 , _UpperCamelCase : Tuple=3_0_7_2 , _UpperCamelCase : Tuple="gelu" , _UpperCamelCase : Optional[Any]=0.1 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Dict=1_6_3_8_4 , _UpperCamelCase : List[Any]=1_6 , _UpperCamelCase : Tuple=0.02 , _UpperCamelCase : Optional[Any]=1e-12 , _UpperCamelCase : Union[str, Any]=0 , _UpperCamelCase : Union[str, Any]=0xe_0_0_0 , _UpperCamelCase : Dict=0xe_0_0_1 , _UpperCamelCase : Any=4 , _UpperCamelCase : Tuple=4 , _UpperCamelCase : List[str]=8 , _UpperCamelCase : Any=1_6_3_8_4 , _UpperCamelCase : List[str]=1_2_8 , **_UpperCamelCase : str , ) ->List[Any]: super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) snake_case_ = max_position_embeddings snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = type_vocab_size snake_case_ = layer_norm_eps # Character config: snake_case_ = downsampling_rate snake_case_ = upsampling_kernel_size snake_case_ = num_hash_functions snake_case_ = num_hash_buckets snake_case_ = local_transformer_stride
8
from __future__ import annotations def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = len(SCREAMING_SNAKE_CASE__ ) # We need to create solution object to save path. snake_case_ = [[0 for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )] snake_case_ = run_maze(SCREAMING_SNAKE_CASE__ , 0 , 0 , SCREAMING_SNAKE_CASE__ ) if solved: print('''\n'''.join(str(SCREAMING_SNAKE_CASE__ ) for row in solutions ) ) else: print('''No solution exists!''' ) return solved def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = len(SCREAMING_SNAKE_CASE__ ) # Final check point. if i == j == (size - 1): snake_case_ = 1 return True snake_case_ = (not i < 0) and (not j < 0) # Check lower bounds snake_case_ = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. snake_case_ = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited snake_case_ = 1 # check for directions if ( run_maze(SCREAMING_SNAKE_CASE__ , i + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j + 1 , SCREAMING_SNAKE_CASE__ ) or run_maze(SCREAMING_SNAKE_CASE__ , i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j - 1 , SCREAMING_SNAKE_CASE__ ) ): return True snake_case_ = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
8
1
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): while second != 0: snake_case_ = first & second first ^= second snake_case_ = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase_ = int(input('''Enter the first number: ''').strip()) lowerCAmelCase_ = int(input('''Enter the second number: ''').strip()) print(f"""{add(first, second) = }""")
8
from decimal import Decimal, getcontext from math import ceil, factorial def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise TypeError('''Undefined for non-integers''' ) elif precision < 1: raise ValueError('''Undefined for non-natural numbers''' ) snake_case_ = precision snake_case_ = ceil(precision / 14 ) snake_case_ = 426880 * Decimal(10005 ).sqrt() snake_case_ = 1 snake_case_ = 13591409 snake_case_ = Decimal(SCREAMING_SNAKE_CASE__ ) for k in range(1 , SCREAMING_SNAKE_CASE__ ): snake_case_ = factorial(6 * k ) // (factorial(3 * k ) * factorial(SCREAMING_SNAKE_CASE__ ) ** 3) linear_term += 545140134 exponential_term *= -262537412640768000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": lowerCAmelCase_ = 50 print(f"""The first {n} digits of pi is: {pi(n)}""")
8
1
import pytest import datasets # Import fixture modules as plugins lowerCAmelCase_ = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec'''] def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit") for item in items: if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ): continue item.add_marker(pytest.mark.unit ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' ) @pytest.fixture(autouse=SCREAMING_SNAKE_CASE__ ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work? snake_case_ = tmp_path_factory.getbasetemp() / '''cache''' snake_case_ = test_hf_cache_home / '''datasets''' snake_case_ = test_hf_cache_home / '''metrics''' snake_case_ = test_hf_cache_home / '''modules''' monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(SCREAMING_SNAKE_CASE__ ) ) monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(SCREAMING_SNAKE_CASE__ ) ) monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(SCREAMING_SNAKE_CASE__ ) ) snake_case_ = test_hf_datasets_cache / '''downloads''' monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(SCREAMING_SNAKE_CASE__ ) ) snake_case_ = test_hf_datasets_cache / '''downloads''' / '''extracted''' monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(SCREAMING_SNAKE_CASE__ ) ) @pytest.fixture(autouse=SCREAMING_SNAKE_CASE__ , scope='''session''' ) def __SCREAMING_SNAKE_CASE (): datasets.disable_progress_bar() @pytest.fixture(autouse=SCREAMING_SNAKE_CASE__ ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): # don't take tests into account when counting downloads monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , SCREAMING_SNAKE_CASE__ ) @pytest.fixture def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): # Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0 # To be removed once SQLAlchemy 2.0 supported monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , SCREAMING_SNAKE_CASE__ )
8
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class snake_case_ ( __A ): '''simple docstring''' def __init__( self : int , _UpperCamelCase : pyspark.sql.DataFrame , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : bool = True , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : str = None , _UpperCamelCase : bool = True , _UpperCamelCase : str = "arrow" , **_UpperCamelCase : Tuple , ) ->str: super().__init__( split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , **_UpperCamelCase , ) snake_case_ = load_from_cache_file snake_case_ = file_format snake_case_ = Spark( df=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , working_dir=_UpperCamelCase , **_UpperCamelCase , ) def snake_case__( self : int ) ->Tuple: if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) snake_case_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=_UpperCamelCase , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
8
1
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. lowerCAmelCase_ = abspath(join(dirname(__file__), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): config.addinivalue_line( '''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' ) config.addinivalue_line( '''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' ) config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' ) config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' ) config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' ) config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(SCREAMING_SNAKE_CASE__ ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): from transformers.testing_utils import pytest_terminal_summary_main snake_case_ = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(SCREAMING_SNAKE_CASE__ , id=SCREAMING_SNAKE_CASE__ ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # If no tests are collected, pytest exists with code 5, which makes the CI fail. if exitstatus == 5: snake_case_ = 0 # Doctest custom flag to ignore output. lowerCAmelCase_ = doctest.register_optionflag('''IGNORE_RESULT''') lowerCAmelCase_ = doctest.OutputChecker class snake_case_ ( __A ): '''simple docstring''' def snake_case__( self : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any] ) ->int: if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) lowerCAmelCase_ = CustomOutputChecker lowerCAmelCase_ = HfDoctestModule lowerCAmelCase_ = HfDocTestParser
8
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable lowerCAmelCase_ = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''DPTFeatureExtractor'''] lowerCAmelCase_ = ['''DPTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DPTForDepthEstimation''', '''DPTForSemanticSegmentation''', '''DPTModel''', '''DPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
8
1
import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=5 ): # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count('''<mask>''' ) == 1 snake_case_ = torch.tensor(tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) ).unsqueeze(0 ) # Batch size 1 snake_case_ = model(SCREAMING_SNAKE_CASE__ )[0] # The last hidden-state is the first element of the output tuple snake_case_ = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() snake_case_ = logits[0, masked_index, :] snake_case_ = logits.softmax(dim=0 ) snake_case_, snake_case_ = prob.topk(k=SCREAMING_SNAKE_CASE__ , dim=0 ) snake_case_ = ''' '''.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(SCREAMING_SNAKE_CASE__ ) )] ) snake_case_ = tokenizer.mask_token snake_case_ = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ): snake_case_ = predicted_token_bpe.replace('''\u2581''' , ''' ''' ) if " {0}".format(SCREAMING_SNAKE_CASE__ ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(''' {0}'''.format(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), values[index].item(), predicted_token, ) ) return topk_filled_outputs lowerCAmelCase_ = CamembertTokenizer.from_pretrained('''camembert-base''') lowerCAmelCase_ = CamembertForMaskedLM.from_pretrained('''camembert-base''') model.eval() lowerCAmelCase_ = '''Le camembert est <mask> :)''' print(fill_mask(masked_input, model, tokenizer, topk=3))
8
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer lowerCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase_ = { '''vocab_file''': { '''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''', }, '''tokenizer_file''': { '''unc-nlp/lxmert-base-uncased''': ( '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase_ = { '''unc-nlp/lxmert-base-uncased''': 5_12, } lowerCAmelCase_ = { '''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True}, } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : Any = LxmertTokenizer def __init__( self : Union[str, Any] , _UpperCamelCase : int=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Dict=True , _UpperCamelCase : Any="[UNK]" , _UpperCamelCase : Tuple="[SEP]" , _UpperCamelCase : List[Any]="[PAD]" , _UpperCamelCase : Union[str, Any]="[CLS]" , _UpperCamelCase : str="[MASK]" , _UpperCamelCase : List[str]=True , _UpperCamelCase : List[str]=None , **_UpperCamelCase : List[str] , ) ->Any: super().__init__( _UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , ) snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _UpperCamelCase ) != do_lower_case or normalizer_state.get('''strip_accents''' , _UpperCamelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _UpperCamelCase ) != tokenize_chinese_chars ): snake_case_ = getattr(_UpperCamelCase , normalizer_state.pop('''type''' ) ) snake_case_ = do_lower_case snake_case_ = strip_accents snake_case_ = tokenize_chinese_chars snake_case_ = normalizer_class(**_UpperCamelCase ) snake_case_ = do_lower_case def snake_case__( self : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=None ) ->List[Any]: snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case__( self : Any , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]: snake_case_ = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase ) return tuple(_UpperCamelCase )
8
1