code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = LEDConfig
_UpperCAmelCase : int = {}
_UpperCAmelCase : List[str] = "gelu"
def __init__( self : Union[str, Any] , lowercase : Optional[int] , lowercase : Dict=13 , lowercase : Dict=7 , lowercase : Tuple=True , lowercase : Dict=False , lowercase : Dict=99 , lowercase : Any=32 , lowercase : List[Any]=2 , lowercase : List[str]=4 , lowercase : List[str]=37 , lowercase : Dict=0.1 , lowercase : int=0.1 , lowercase : List[Any]=20 , lowercase : int=2 , lowercase : Optional[Any]=1 , lowercase : List[str]=0 , lowercase : Optional[int]=4 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = eos_token_id
_snake_case = pad_token_id
_snake_case = bos_token_id
_snake_case = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case = prepare_led_inputs_dict(lowercase , lowercase , lowercase )
_snake_case = tf.concat(
[tf.zeros_like(lowercase )[:, :-1], tf.ones_like(lowercase )[:, -1:]] , axis=-1 , )
_snake_case = global_attention_mask
return config, inputs_dict
def A ( self : str , lowercase : str , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = TFLEDModel(config=lowercase ).get_decoder()
_snake_case = inputs_dict['input_ids']
_snake_case = input_ids[:1, :]
_snake_case = inputs_dict['attention_mask'][:1, :]
_snake_case = 1
# first forward pass
_snake_case = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
_snake_case , _snake_case = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case = model(lowercase , attention_mask=lowercase )[0]
_snake_case = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case = output_from_no_past[:, -3:, random_slice_idx]
_snake_case = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : str=None , ) -> Union[str, Any]:
if attention_mask is None:
_snake_case = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_UpperCAmelCase : Optional[int] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_UpperCAmelCase : Tuple = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCAmelCase : str = True
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : str = False
_UpperCAmelCase : List[Any] = False
def A ( self : Any ):
'''simple docstring'''
_snake_case = TFLEDModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = tf.zeros_like(inputs_dict['attention_mask'] )
_snake_case = 2
_snake_case = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
_snake_case = True
_snake_case = self.model_tester.seq_length
_snake_case = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase : List[str] ):
_snake_case = outputs.decoder_attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase : List[str] ):
_snake_case = [t.numpy() for t in outputs.encoder_attentions]
_snake_case = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = False
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
_snake_case = len(lowercase )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
if self.is_encoder_decoder:
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_decoder_attentions_output(lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
# Check attention is always last and order is fine
_snake_case = True
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase ) )
self.assertEqual(model.config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def A ( self : List[Any] ):
'''simple docstring'''
pass
def A ( self : Any ):
'''simple docstring'''
pass
def a_ ( __lowercase : str ) -> Optional[Any]:
return tf.constant(__lowercase , dtype=tf.intaa )
_lowerCamelCase : List[Any] = 1E-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 )
def A ( self : str ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 , rtol=1E-3 ) | 686 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = DDIMPipeline
SCREAMING_SNAKE_CASE : Any = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE : int = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'latents',
'callback',
'callback_steps',
}
SCREAMING_SNAKE_CASE : Union[str, Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE : Union[str, Any] = False
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
torch.manual_seed(0 )
__lowercase = UNetaDModel(
block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=3 ,out_channels=3 ,down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') ,up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') ,)
__lowercase = DDIMScheduler()
__lowercase = {'''unet''': unet, '''scheduler''': scheduler}
return components
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : str ,lowercase__ : int=0 ):
if str(lowercase__ ).startswith('''mps''' ):
__lowercase = torch.manual_seed(lowercase__ )
else:
__lowercase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__lowercase = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = '''cpu'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**lowercase__ )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs(lowercase__ )
__lowercase = pipe(**lowercase__ ).images
__lowercase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 3_2, 3_2, 3) )
__lowercase = np.array(
[1.0_0_0e0_0, 5.7_1_7e-0_1, 4.7_1_7e-0_1, 1.0_0_0e0_0, 0.0_0_0e0_0, 1.0_0_0e0_0, 3.0_0_0e-0_4, 0.0_0_0e0_0, 9.0_0_0e-0_4] )
__lowercase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase__ ,1e-3 )
def SCREAMING_SNAKE_CASE ( self : Dict ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : str ):
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : int ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = '''google/ddpm-cifar10-32'''
__lowercase = UNetaDModel.from_pretrained(lowercase__ )
__lowercase = DDIMScheduler()
__lowercase = DDIMPipeline(unet=lowercase__ ,scheduler=lowercase__ )
ddim.to(lowercase__ )
ddim.set_progress_bar_config(disable=lowercase__ )
__lowercase = torch.manual_seed(0 )
__lowercase = ddim(generator=lowercase__ ,eta=0.0 ,output_type='''numpy''' ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__lowercase = np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = '''google/ddpm-ema-bedroom-256'''
__lowercase = UNetaDModel.from_pretrained(lowercase__ )
__lowercase = DDIMScheduler.from_pretrained(lowercase__ )
__lowercase = DDIMPipeline(unet=lowercase__ ,scheduler=lowercase__ )
ddpm.to(lowercase__ )
ddpm.set_progress_bar_config(disable=lowercase__ )
__lowercase = torch.manual_seed(0 )
__lowercase = ddpm(generator=lowercase__ ,output_type='''numpy''' ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
__lowercase = np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 41 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_lowerCamelCase : Union[str, Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_lowerCamelCase : Union[str, Any] = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
_lowerCamelCase : Optional[int] = '''zero2'''
_lowerCamelCase : List[Any] = '''zero3'''
_lowerCamelCase : Dict = [ZEROa, ZEROa]
def a_ ( __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Tuple ) -> Dict:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_snake_case = parameterized.to_safe_name('_'.join(str(__lowercase ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
_lowerCamelCase : Dict = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : List[Any] , lowercase : Dict ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Any , lowercase : str , lowercase : List[str] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : Optional[Any] , lowercase : Optional[int] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
def A ( self : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
pass
def A ( self : str , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : bool = True , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = models[model]
_snake_case = self.run_trainer(
stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , )
self.do_checks(lowercase )
return output_dir
def A ( self : Any , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : int = 1 , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase )
_snake_case = f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_snake_case = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_snake_case = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_snake_case = self.get_launcher(lowercase )
_snake_case = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase , env=self.get_env() )
return output_dir
def A ( self : List[str] , lowercase : Any=False ):
'''simple docstring'''
_snake_case = min(2 , get_gpu_count() ) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split() | 686 | 0 |
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ) -> bool:
if num < 0:
return False
lowerCamelCase_ = num
lowerCamelCase_ = 0
while num > 0:
lowerCamelCase_ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCamelCase : int = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class _a ( UpperCamelCase__ ):
_lowercase : List[Any] = '''swinv2'''
_lowercase : Tuple = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self: Union[str, Any] , UpperCamelCase_: Tuple=224 , UpperCamelCase_: Optional[int]=4 , UpperCamelCase_: int=3 , UpperCamelCase_: List[Any]=96 , UpperCamelCase_: str=[2, 2, 6, 2] , UpperCamelCase_: Dict=[3, 6, 12, 24] , UpperCamelCase_: List[str]=7 , UpperCamelCase_: Optional[int]=4.0 , UpperCamelCase_: int=True , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: Dict=0.0 , UpperCamelCase_: Union[str, Any]=0.1 , UpperCamelCase_: Union[str, Any]="gelu" , UpperCamelCase_: int=False , UpperCamelCase_: Optional[Any]=0.02 , UpperCamelCase_: Union[str, Any]=1E-5 , UpperCamelCase_: Optional[int]=32 , **UpperCamelCase_: List[str] , ) -> List[Any]:
"""simple docstring"""
super().__init__(**UpperCamelCase_ )
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = depths
lowercase__ = len(UpperCamelCase_ )
lowercase__ = num_heads
lowercase__ = window_size
lowercase__ = mlp_ratio
lowercase__ = qkv_bias
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = hidden_act
lowercase__ = use_absolute_embeddings
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
lowercase__ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase__ = int(embed_dim * 2 ** (len(UpperCamelCase_ ) - 1) )
lowercase__ = (0, 0, 0, 0)
| 43 |
import random
from .binary_exp_mod import bin_exp_mod
def a_ ( __lowercase : int , __lowercase : Any=1_000 ) -> int:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_snake_case = n - 1
_snake_case = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_snake_case = 0
while count < prec:
_snake_case = random.randint(2 , n - 1 )
_snake_case = bin_exp_mod(__lowercase , __lowercase , __lowercase )
if b != 1:
_snake_case = True
for _ in range(__lowercase ):
if b == n - 1:
_snake_case = False
break
_snake_case = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_lowerCamelCase : Tuple = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i))) | 686 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : str = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = original_name.split("." )[0]
_lowerCamelCase : List[Any] = key.split("." )
_lowerCamelCase : Dict = int(key_list[key_list.index(_lowerCAmelCase ) - 2] )
_lowerCamelCase : Union[str, Any] = int(key_list[key_list.index(_lowerCAmelCase ) - 1] )
_lowerCamelCase : Optional[int] = orig_block_num - offset
_lowerCamelCase : Dict = key.replace(F'{orig_block_num}.{layer_num}.{original_name}' , F'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = OrderedDict()
_lowerCamelCase , _lowerCamelCase : Tuple = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
_lowerCamelCase : List[str] = key.replace("network" , "poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
_lowerCamelCase : Dict = key[: key.find("proj" )]
_lowerCamelCase : int = key.replace(_lowerCAmelCase , F'patch_embeddings.{total_embed_found}.' )
_lowerCamelCase : Tuple = key.replace("proj" , "projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
_lowerCamelCase : List[Any] = "poolformer.encoder." + key
if "mlp.fc1" in key:
_lowerCamelCase : Optional[int] = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , "mlp.fc1" , "output.conv1" )
if "mlp.fc2" in key:
_lowerCamelCase : str = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , "mlp.fc2" , "output.conv2" )
if "norm1" in key:
_lowerCamelCase : int = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , "norm1" , "before_norm" )
if "norm2" in key:
_lowerCamelCase : int = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , "norm2" , "after_norm" )
if "layer_scale_1" in key:
_lowerCamelCase : str = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , "layer_scale_1" , "layer_scale_1" )
if "layer_scale_2" in key:
_lowerCamelCase : Optional[int] = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , "layer_scale_2" , "layer_scale_2" )
if "head" in key:
_lowerCamelCase : Tuple = key.replace("head" , "classifier" )
_lowerCamelCase : Tuple = value
return new_state_dict
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : List[str] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return image
@torch.no_grad()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Dict = PoolFormerConfig()
# set attributes based on model_name
_lowerCamelCase : Optional[int] = "huggingface/label-files"
_lowerCamelCase : Optional[Any] = model_name[-3:]
_lowerCamelCase : str = 1000
_lowerCamelCase : Union[str, Any] = "imagenet-1k-id2label.json"
_lowerCamelCase : Optional[Any] = (1, 1000)
# set config attributes
_lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : List[str] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Tuple = idalabel
_lowerCamelCase : int = {v: k for k, v in idalabel.items()}
if size == "s12":
_lowerCamelCase : List[Any] = [2, 2, 6, 2]
_lowerCamelCase : Optional[int] = [64, 128, 320, 512]
_lowerCamelCase : Any = 4.0
_lowerCamelCase : int = 0.9
elif size == "s24":
_lowerCamelCase : List[str] = [4, 4, 12, 4]
_lowerCamelCase : Tuple = [64, 128, 320, 512]
_lowerCamelCase : Union[str, Any] = 4.0
_lowerCamelCase : Dict = 0.9
elif size == "s36":
_lowerCamelCase : List[str] = [6, 6, 18, 6]
_lowerCamelCase : int = [64, 128, 320, 512]
_lowerCamelCase : Optional[Any] = 4.0
_lowerCamelCase : Optional[int] = 1E-6
_lowerCamelCase : Union[str, Any] = 0.9
elif size == "m36":
_lowerCamelCase : Optional[Any] = [6, 6, 18, 6]
_lowerCamelCase : Dict = [96, 192, 384, 768]
_lowerCamelCase : Optional[Any] = 4.0
_lowerCamelCase : Union[str, Any] = 1E-6
_lowerCamelCase : Tuple = 0.9_5
elif size == "m48":
_lowerCamelCase : Optional[Any] = [8, 8, 24, 8]
_lowerCamelCase : Optional[Any] = [96, 192, 384, 768]
_lowerCamelCase : List[str] = 4.0
_lowerCamelCase : Union[str, Any] = 1E-6
_lowerCamelCase : str = 0.9_5
else:
raise ValueError(F'Size {size} not supported' )
# load image processor
_lowerCamelCase : Union[str, Any] = PoolFormerImageProcessor(crop_pct=_lowerCAmelCase )
# Prepare image
_lowerCamelCase : Dict = prepare_img()
_lowerCamelCase : List[str] = image_processor(images=_lowerCAmelCase , return_tensors="pt" ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
_lowerCamelCase : Any = torch.load(_lowerCAmelCase , map_location=torch.device("cpu" ) )
# rename keys
_lowerCamelCase : Dict = rename_keys(_lowerCAmelCase )
# create HuggingFace model and load state dict
_lowerCamelCase : Optional[Any] = PoolFormerForImageClassification(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
# Define image processor
_lowerCamelCase : Optional[Any] = PoolFormerImageProcessor(crop_pct=_lowerCAmelCase )
_lowerCamelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values
# forward pass
_lowerCamelCase : Union[str, Any] = model(_lowerCAmelCase )
_lowerCamelCase : Any = outputs.logits
# define expected logit slices for different models
if size == "s12":
_lowerCamelCase : Tuple = torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9] )
elif size == "s24":
_lowerCamelCase : Tuple = torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5] )
elif size == "s36":
_lowerCamelCase : Tuple = torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8] )
elif size == "m36":
_lowerCamelCase : Dict = torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8] )
elif size == "m48":
_lowerCamelCase : str = torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3] )
else:
raise ValueError(F'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _lowerCAmelCase , atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path) | 44 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_lowerCamelCase : int = re.compile(r'''\s+''')
def a_ ( __lowercase : List[Any] ) -> int:
return {"hash": hashlib.mda(re.sub(__lowercase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def a_ ( __lowercase : List[Any] ) -> Dict:
_snake_case = [len(__lowercase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(__lowercase ), "line_max": max(__lowercase )}
def a_ ( __lowercase : Optional[int] ) -> List[str]:
_snake_case = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] ) -> Optional[int]:
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def a_ ( __lowercase : Union[str, Any] , __lowercase : int=5 ) -> Optional[Any]:
_snake_case = ['auto-generated', 'autogenerated', 'automatically generated']
_snake_case = example['content'].splitlines()
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a_ ( __lowercase : List[Any] , __lowercase : int=5 , __lowercase : Tuple=0.0_5 ) -> Union[str, Any]:
_snake_case = ['unit tests', 'test file', 'configuration file']
_snake_case = example['content'].splitlines()
_snake_case = 0
_snake_case = 0
# first test
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_snake_case = example['content'].count('\n' )
_snake_case = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a_ ( __lowercase : Union[str, Any] ) -> Any:
_snake_case = ['def ', 'class ', 'for ', 'while ']
_snake_case = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a_ ( __lowercase : Tuple , __lowercase : Any=4 ) -> List[str]:
_snake_case = example['content'].splitlines()
_snake_case = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a_ ( __lowercase : Dict ) -> Dict:
_snake_case = tokenizer(example['content'] , truncation=__lowercase )['input_ids']
_snake_case = len(example['content'] ) / len(__lowercase )
return {"ratio": ratio}
def a_ ( __lowercase : Optional[Any] ) -> Any:
_snake_case = {}
results.update(get_hash(__lowercase ) )
results.update(line_stats(__lowercase ) )
results.update(alpha_stats(__lowercase ) )
results.update(char_token_ratio(__lowercase ) )
results.update(is_autogenerated(__lowercase ) )
results.update(is_config_or_test(__lowercase ) )
results.update(has_no_keywords(__lowercase ) )
results.update(has_few_assignments(__lowercase ) )
return results
def a_ ( __lowercase : Optional[int] , __lowercase : str , __lowercase : List[Any] ) -> int:
if not check_uniques(__lowercase , __lowercase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a_ ( __lowercase : Dict ) -> Dict:
with open(__lowercase , 'rb' ) as f_in:
with gzip.open(str(__lowercase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(__lowercase , __lowercase )
os.unlink(__lowercase )
# Settings
_lowerCamelCase : Dict = HfArgumentParser(PreprocessingArguments)
_lowerCamelCase : Dict = parser.parse_args()
if args.num_workers is None:
_lowerCamelCase : int = multiprocessing.cpu_count()
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_lowerCamelCase : Any = time.time()
_lowerCamelCase : Optional[Any] = load_dataset(args.dataset_name, split='''train''')
print(F'Time to load dataset: {time.time()-t_start:.2f}')
# Run preprocessing
_lowerCamelCase : Optional[int] = time.time()
_lowerCamelCase : Union[str, Any] = ds.map(preprocess, num_proc=args.num_workers)
print(F'Time to preprocess dataset: {time.time()-t_start:.2f}')
# Deduplicate hashes
_lowerCamelCase : List[Any] = set(ds.unique('''hash'''))
_lowerCamelCase : Dict = len(uniques) / len(ds)
print(F'Fraction of duplicates: {1-frac:.2%}')
# Deduplicate data and apply heuristics
_lowerCamelCase : List[Any] = time.time()
_lowerCamelCase : Optional[int] = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F'Time to filter dataset: {time.time()-t_start:.2f}')
print(F'Size of filtered dataset: {len(ds_filter)}')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_lowerCamelCase : Union[str, Any] = time.time()
_lowerCamelCase , _lowerCamelCase : Dict = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'Time to deduplicate dataset: {time.time()-t_start:.2f}')
print(F'Size of deduplicate dataset: {len(ds_filter)}')
# Save data in batches of samples_per_file
_lowerCamelCase : Optional[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
_lowerCamelCase : int = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
_lowerCamelCase : Union[str, Any] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_lowerCamelCase : Dict = str(data_dir / F'file-{file_number+1:012}.json')
_lowerCamelCase : str = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'Time to save dataset: {time.time()-t_start:.2f}') | 686 | 0 |
import requests
def A ( lowercase__ : str , lowercase__ : str ) -> None:
UpperCamelCase__ :Dict = {"""Content-Type""": """application/json"""}
UpperCamelCase__ :Optional[Any] = requests.post(lowercase__ , json={"""text""": message_body} , headers=lowercase__ )
if response.status_code != 200:
UpperCamelCase__ :Union[str, Any] = (
"""Request to slack returned an error """
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(lowercase__ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>") | 45 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : int = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = "yolos"
def __init__( self : int , lowercase : List[str]=768 , lowercase : Tuple=12 , lowercase : int=12 , lowercase : int=3_072 , lowercase : Optional[int]="gelu" , lowercase : str=0.0 , lowercase : Optional[int]=0.0 , lowercase : Optional[Any]=0.02 , lowercase : List[str]=1E-12 , lowercase : Dict=[512, 864] , lowercase : Union[str, Any]=16 , lowercase : List[Any]=3 , lowercase : List[str]=True , lowercase : Optional[int]=100 , lowercase : int=True , lowercase : Dict=False , lowercase : str=1 , lowercase : int=5 , lowercase : Tuple=2 , lowercase : List[str]=5 , lowercase : Any=2 , lowercase : List[str]=0.1 , **lowercase : int , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = qkv_bias
_snake_case = num_detection_tokens
_snake_case = use_mid_position_embeddings
_snake_case = auxiliary_loss
# Hungarian matcher
_snake_case = class_cost
_snake_case = bbox_cost
_snake_case = giou_cost
# Loss coefficients
_snake_case = bbox_loss_coefficient
_snake_case = giou_loss_coefficient
_snake_case = eos_coefficient
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = version.parse("1.11" )
@property
def A ( self : str ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Any ):
'''simple docstring'''
return 1E-4
@property
def A ( self : List[Any] ):
'''simple docstring'''
return 12 | 686 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : Any = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 46 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_lowerCamelCase : Tuple = logging.get_logger(__name__)
# General docstring
_lowerCamelCase : Union[str, Any] = '''ResNetConfig'''
# Base docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[Any] = [1, 2_048, 7, 7]
# Image classification docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[int] = '''tiger cat'''
_lowerCamelCase : str = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 3 , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(
lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=kernel_size // 2 , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def A ( self : Union[str, Any] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def A ( self : Tuple , lowercase : Tensor ):
'''simple docstring'''
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowercase )
_snake_case = self.pooler(lowercase )
return embedding
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase : int , lowercase : int , lowercase : int = 2 ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(lowercase , lowercase , kernel_size=1 , stride=lowercase , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : List[str] , lowercase : List[str] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" , lowercase : int = 4 ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , kernel_size=1 ) , ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : Dict , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowercase : ResNetConfig , lowercase : int , lowercase : int , lowercase : int = 2 , lowercase : int = 2 , ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , stride=lowercase , activation=config.hidden_act ) , *[layer(lowercase , lowercase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase , config.depths[1:] ):
self.stages.append(ResNetStage(lowercase , lowercase , lowercase , depth=lowercase ) )
def A ( self : str , lowercase : Tensor , lowercase : bool = False , lowercase : bool = True ):
'''simple docstring'''
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowercase )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase , hidden_states=lowercase , )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = ResNetConfig
_UpperCAmelCase : Tuple = "resnet"
_UpperCAmelCase : Optional[Any] = "pixel_values"
_UpperCAmelCase : Dict = True
def A ( self : List[str] , lowercase : Dict ):
'''simple docstring'''
if isinstance(lowercase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowercase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def A ( self : Tuple , lowercase : List[Any] , lowercase : Optional[Any]=False ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
_snake_case = value
_lowerCamelCase : str = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCamelCase : int = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : Any ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : Union[str, Any] , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowercase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase : int ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowercase )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Union[str, Any] , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[torch.LongTensor] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowercase )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowercase , lowercase )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : Union[str, Any] ):
'''simple docstring'''
super().__init__(lowercase )
super()._init_backbone(lowercase )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@replace_return_docstrings(output_type=lowercase , config_class=_CONFIG_FOR_DOC )
def A ( self : Dict , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowercase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowercase , ) | 686 | 0 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE__ = 50 # max width of layer names
SCREAMING_SNAKE_CASE__ = 70 # max width of quantizer names
def UpperCAmelCase__ ( lowerCamelCase_ : Dict ):
__a : int = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=lowerCamelCase_ , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=lowerCamelCase_ , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=lowerCamelCase_ , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=lowerCamelCase_ , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=lowerCamelCase_ , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=lowerCamelCase_ , type=lowerCamelCase_ , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=lowerCamelCase_ , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def UpperCAmelCase__ ( lowerCamelCase_ : Dict ):
if args.calibrator == "max":
__a : str = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
__a : int = 'histogram'
elif args.calibrator == "mse":
__a : List[str] = 'histogram'
else:
raise ValueError(f'''Invalid calibrator {args.calibrator}''' )
__a : Optional[Any] = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCamelCase_ )
__a : Optional[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase_ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Dict=False ):
logger.info('Configuring Model for Quantization' )
logger.info(f'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCamelCase_ , ['embeddings'] , which='weight' , _disabled=lowerCamelCase_ )
if args.quant_disable:
set_quantizer_by_name(lowerCamelCase_ , [''] , _disabled=lowerCamelCase_ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCamelCase_ , args.quant_disable_keyword , _disabled=lowerCamelCase_ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCamelCase_ , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=lowerCamelCase_ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCamelCase_ , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=lowerCamelCase_ )
if args.recalibrate_weights:
recalibrate_weights(lowerCamelCase_ )
if args.fuse_qkv:
fuse_qkv(lowerCamelCase_ , lowerCamelCase_ )
if args.clip_gelu:
clip_gelu(lowerCamelCase_ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : str ):
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'''{name:80}: {module}''' )
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Dict ):
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ):
def fusea(lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int] ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCamelCase_ , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
__a : Any = qq._amax.detach().item()
__a : Union[str, Any] = qk._amax.detach().item()
__a : int = qv._amax.detach().item()
__a : List[Any] = max(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
qq._amax.fill_(lowerCamelCase_ )
qk._amax.fill_(lowerCamelCase_ )
qv._amax.fill_(lowerCamelCase_ )
logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(f'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str ):
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
__a : Dict = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase_ )
__a : List[str] = mod._input_quantizer._amax.data.detach().item()
logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] ):
for name, mod in model.named_modules():
if hasattr(lowerCamelCase_ , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
__a : Optional[int] = mod.weight.shape[0]
__a : List[str] = mod._weight_quantizer._amax.detach()
__a : List[Any] = torch.ones(lowerCamelCase_ , dtype=amax.dtype , device=amax.device ) * amax
print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def UpperCAmelCase__ ( lowerCamelCase_ : Tuple ):
for name, mod in model.named_modules():
if hasattr(lowerCamelCase_ , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
__a : Tuple = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
__a : str = set(range(len(mod.weight.size() ) ) ) - axis_set
__a : Union[str, Any] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCamelCase_ , keepdims=lowerCamelCase_ ).detach()
logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
__a : Union[str, Any] = amax
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any]=2_5 , lowerCamelCase_ : Dict=1_8_0 , lowerCamelCase_ : int=None ):
if ignore is None:
__a : Any = []
elif not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__a : int = [ignore]
__a : Optional[int] = 0
for name, mod in model.named_modules():
if not hasattr(lowerCamelCase_ , 'weight' ):
continue
__a : List[str] = max(lowerCamelCase_ , len(lowerCamelCase_ ) )
for name, mod in model.named_modules():
__a : Optional[Any] = getattr(lowerCamelCase_ , '_input_quantizer' , lowerCamelCase_ )
__a : int = getattr(lowerCamelCase_ , '_weight_quantizer' , lowerCamelCase_ )
if not hasattr(lowerCamelCase_ , 'weight' ):
continue
if type(lowerCamelCase_ ) in ignore:
continue
if [True for s in ignore if type(lowerCamelCase_ ) is str and s in name]:
continue
__a : Any = f'''Act:{input_q.extra_repr()}'''
__a : str = f'''Wgt:{weight_q.extra_repr()}'''
__a : Optional[int] = f'''{name:{name_width}} {act_str} {wgt_str}'''
if len(lowerCamelCase_ ) <= line_width:
logger.info(lowerCamelCase_ )
else:
logger.info(f'''{name:{name_width}} {act_str}''' )
logger.info(f'''{" ":{name_width}} {wgt_str}''' )
def UpperCAmelCase__ ( lowerCamelCase_ : List[str] ):
__a : Optional[int] = 0
for name, mod in model.named_modules():
if isinstance(lowerCamelCase_ , pytorch_quantization.nn.TensorQuantizer ):
print(f'''{name:80} {mod}''' )
count += 1
print(f'''{count} TensorQuantizers found in model''' )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str ):
__a : List[Any] = getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if quantizer_mod is not None:
assert hasattr(lowerCamelCase_ , lowerCamelCase_ )
setattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
logger.warning(f'''{name} has no {quantizer}''' )
def UpperCAmelCase__ ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any="both" , **lowerCamelCase_ : Any ):
__a : Union[str, Any] = f'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(lowerCamelCase_ , lowerCamelCase_ , '_input_quantizer' , lowerCamelCase_ , lowerCamelCase_ )
if which in ["weight", "both"]:
set_quantizer(lowerCamelCase_ , lowerCamelCase_ , '_weight_quantizer' , lowerCamelCase_ , lowerCamelCase_ )
logger.info(lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple , **lowerCamelCase_ : List[str] ):
for name, mod in model.named_modules():
if hasattr(lowerCamelCase_ , '_input_quantizer' ) or hasattr(lowerCamelCase_ , '_weight_quantizer' ):
for n in names:
if re.search(lowerCamelCase_ , lowerCamelCase_ ):
set_quantizers(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(lowerCamelCase_ , lowerCamelCase_ ):
__a : Dict = f'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
setattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
logger.info(lowerCamelCase_ )
| 47 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class A ( unittest.TestCase ):
snake_case__ :List[Any] = ViTImageProcessor if is_vision_available() else None
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = (3, 32, 128)
lowerCAmelCase__ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase__ = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
lowerCAmelCase__ = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__magic_name__ ) + "\n" )
lowerCAmelCase__ = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 32, "width": 128},
}
lowerCAmelCase__ = os.path.join(self.tmpdirname , __magic_name__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , **__magic_name__ : Optional[int] ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , **__magic_name__ : Optional[int] ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
lowerCAmelCase__ = Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) )
return image_input
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = MgpstrProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__magic_name__ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = MgpstrProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCAmelCase__ = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 )
lowerCAmelCase__ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__magic_name__ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = MgpstrProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = image_processor(__magic_name__ , return_tensors="np" )
lowerCAmelCase__ = processor(images=__magic_name__ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = MgpstrProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCAmelCase__ = "test"
lowerCAmelCase__ = processor(text=__magic_name__ )
lowerCAmelCase__ = tokenizer(__magic_name__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = MgpstrProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCAmelCase__ = "test"
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = MgpstrProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ = processor.char_decode(__magic_name__ )
lowerCAmelCase__ = tokenizer.batch_decode(__magic_name__ )
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = MgpstrProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCAmelCase__ = None
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = MgpstrProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCAmelCase__ = torch.randn(1 , 27 , 38 )
lowerCAmelCase__ = torch.randn(1 , 27 , 50257 )
lowerCAmelCase__ = torch.randn(1 , 27 , 30522 )
lowerCAmelCase__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 48 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def a_ ( __lowercase : Union[str, Any] ) -> List[Any]:
_snake_case = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
_snake_case = DetaConfig(
backbone_config=__lowercase , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=__lowercase , with_box_refine=__lowercase , two_stage=__lowercase , )
# set labels
_snake_case = 'huggingface/label-files'
if "o365" in model_name:
_snake_case = 366
_snake_case = 'object365-id2label.json'
else:
_snake_case = 91
_snake_case = 'coco-detection-id2label.json'
_snake_case = num_labels
_snake_case = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type='dataset' ) ) , 'r' ) )
_snake_case = {int(__lowercase ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def a_ ( __lowercase : int ) -> str:
_snake_case = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : str ) -> Union[str, Any]:
_snake_case = dct.pop(__lowercase )
_snake_case = val
def a_ ( __lowercase : List[str] , __lowercase : str ) -> Dict:
_snake_case = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_snake_case = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:dim, :]
_snake_case = in_proj_bias[: dim]
_snake_case = in_proj_weight[
dim : dim * 2, :
]
_snake_case = in_proj_bias[
dim : dim * 2
]
_snake_case = in_proj_weight[
-dim :, :
]
_snake_case = in_proj_bias[-dim :]
# fmt: on
def a_ ( __lowercase : Dict , __lowercase : Dict ) -> str:
# transformer decoder self-attention layers
_snake_case = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:hidden_size, :]
_snake_case = in_proj_bias[:hidden_size]
_snake_case = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_snake_case = in_proj_bias[hidden_size : hidden_size * 2]
_snake_case = in_proj_weight[-hidden_size:, :]
_snake_case = in_proj_bias[-hidden_size:]
def a_ ( ) -> List[str]:
_snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a_ ( __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Tuple ) -> Optional[Any]:
_snake_case = get_deta_config(__lowercase )
# load original state dict
if model_name == "deta-swin-large":
_snake_case = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
_snake_case = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
_snake_case = torch.load(__lowercase , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(__lowercase , param.shape )
# rename keys
_snake_case = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_swin_q_k_v(__lowercase , config.backbone_config )
read_in_decoder_q_k_v(__lowercase , __lowercase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "input_proj" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
# finally, create HuggingFace model and load state dict
_snake_case = DetaForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
_snake_case = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(__lowercase )
# load image processor
_snake_case = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
_snake_case = prepare_img()
_snake_case = processor(images=__lowercase , return_tensors='pt' )
_snake_case = encoding['pixel_values']
_snake_case = model(pixel_values.to(__lowercase ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_snake_case = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
_snake_case = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
_snake_case = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
_snake_case = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__lowercase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__lowercase ) , atol=1E-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 686 | 0 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowercase : List[Any] = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowercase__ ( snake_case_ :Union[str, Any] ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowercase__ ( snake_case_ :int , snake_case_ :Dict ):
if args.student_type == "roberta":
__UpperCAmelCase = False
elif args.student_type == "gpt2":
__UpperCAmelCase = False
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Union[str, Any] ):
if args.student_type == "roberta":
__UpperCAmelCase = False
def lowercase__ ( ):
__UpperCAmelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case_ , required=snake_case_ , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case_ , required=snake_case_ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case_ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case_ , required=snake_case_ , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case_ , type=snake_case_ , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case_ , required=snake_case_ , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case_ , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case_ , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case_ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case_ , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case_ , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case_ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=snake_case_ , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case_ , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case_ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case_ , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case_ , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case_ , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case_ , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case_ , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case_ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case_ , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case_ , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case_ , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case_ , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case_ , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case_ , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case_ , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case_ , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case_ , default=4_000 , help='''Checkpoint interval.''' )
__UpperCAmelCase = parser.parse_args()
sanity_checks(snake_case_ )
# ARGS #
init_gpu_params(snake_case_ )
set_seed(snake_case_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case_ ) , snake_case_ , indent=4 )
git_log(args.dump_path )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.student_type]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__UpperCAmelCase = tokenizer.all_special_tokens.index(snake_case_ )
__UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
__UpperCAmelCase = special_tok_ids
__UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
__UpperCAmelCase = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__UpperCAmelCase = 0.0 # do not predict special tokens
__UpperCAmelCase = torch.from_numpy(snake_case_ )
else:
__UpperCAmelCase = None
__UpperCAmelCase = LmSeqsDataset(params=snake_case_ , data=snake_case_ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
__UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
__UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ )
else:
__UpperCAmelCase = student_model_class(snake_case_ )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
__UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case_ , snake_case_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case_ , snake_case_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__UpperCAmelCase = Distiller(
params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 49 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_lowerCamelCase : Dict = '''pt'''
elif is_tf_available():
_lowerCamelCase : List[str] = '''tf'''
else:
_lowerCamelCase : List[Any] = '''jax'''
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = PerceiverTokenizer
_UpperCAmelCase : Optional[int] = False
def A ( self : Tuple ):
'''simple docstring'''
super().setUp()
_snake_case = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A ( self : str ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def A ( self : Optional[int] , **lowercase : Dict ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : Optional[int] , lowercase : Tuple , lowercase : Optional[Any]=False , lowercase : int=20 , lowercase : Optional[int]=5 ):
'''simple docstring'''
_snake_case = []
for i in range(len(lowercase ) ):
try:
_snake_case = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_snake_case = list(filter(lambda lowercase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , lowercase ) )
_snake_case = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
_snake_case = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
_snake_case = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case = [t[0] for t in toks]
# Ensure consistency
_snake_case = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
_snake_case = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
_snake_case = ' ' + output_txt
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = 'Unicode €.'
_snake_case = tokenizer(lowercase )
_snake_case = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_snake_case = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]Unicode €.[SEP]' )
_snake_case = tokenizer('e è é ê ë' )
_snake_case = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_snake_case = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_snake_case = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
if FRAMEWORK != "jax":
_snake_case = list(batch.input_ids.numpy()[0] )
else:
_snake_case = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , lowercase )
self.assertIn('attention_mask' , lowercase )
self.assertNotIn('decoder_input_ids' , lowercase )
self.assertNotIn('decoder_attention_mask' , lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = [
'Summary of the text.',
'Another summary.',
]
_snake_case = tokenizer(
text_target=lowercase , max_length=32 , padding='max_length' , truncation=lowercase , return_tensors=lowercase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_snake_case = tokenizer.__class__.from_pretrained(lowercase )
_snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
shutil.rmtree(lowercase )
_snake_case = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_snake_case = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_snake_case = tokenizer.__class__.from_pretrained(lowercase )
_snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_snake_case = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowercase )
_snake_case = [f'''<extra_id_{i}>''' for i in range(125 )]
_snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
_snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case = tokenizer_class.from_pretrained(
lowercase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=lowercase )]
_snake_case = tokenizer_class.from_pretrained(
lowercase , additional_special_tokens=lowercase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
pass
def A ( self : List[str] ):
'''simple docstring'''
pass
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : int ):
'''simple docstring'''
_snake_case = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_snake_case = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
_snake_case = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(lowercase , lowercase ) | 686 | 0 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
UpperCamelCase : Optional[int] = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ):
lowerCamelCase__ = None
lowerCamelCase__ = os.path.abspath(os.path.join("""examples""" ,"""by_feature""" ) )
lowerCamelCase__ = os.path.abspath("""examples""" )
for item in os.listdir(_lowerCAmelCase ):
if item not in EXCLUDE_EXAMPLES:
lowerCamelCase__ = os.path.join(_lowerCAmelCase ,_lowerCAmelCase )
if os.path.isfile(_lowerCAmelCase ) and ".py" in item_path:
with self.subTest(
tested_script=_lowerCAmelCase ,feature_script=_lowerCAmelCase ,tested_section="""main()""" if parser_only else """training_function()""" ,):
lowerCamelCase__ = compare_against_test(
os.path.join(_lowerCAmelCase ,_lowerCAmelCase ) ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = """\n""".join(_lowerCAmelCase )
if special_strings is not None:
for string in special_strings:
lowerCamelCase__ = diff.replace(_lowerCAmelCase ,"""""" )
self.assertEqual(_lowerCAmelCase ,"""""" )
def UpperCamelCase_ ( self ):
self.one_complete_example("""complete_nlp_example.py""" ,_lowerCAmelCase )
self.one_complete_example("""complete_nlp_example.py""" ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = os.path.abspath(os.path.join("""examples""" ,"""cv_example.py""" ) )
lowerCamelCase__ = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
self.one_complete_example("""complete_cv_example.py""" ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
@mock.patch.dict(os.environ ,{'TESTING_MOCKED_DATALOADERS': '1'} )
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = False
@classmethod
def UpperCamelCase_ ( cls ):
super().setUpClass()
lowerCamelCase__ = tempfile.mkdtemp()
lowerCamelCase__ = os.path.join(cls._tmpdir ,"""default_config.yml""" )
write_basic_config(save_location=cls.configPath )
lowerCamelCase__ = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def UpperCamelCase_ ( cls ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir ,"""epoch_0""" ) ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
lowerCamelCase__ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir ,"""step_2""" ) ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir ,"epoch_0" )}
'''.split()
lowerCamelCase__ = run_command(self._launch_args + testargs ,return_stdout=_lowerCAmelCase )
self.assertNotIn("""epoch 0:""" ,_lowerCAmelCase )
self.assertIn("""epoch 1:""" ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir ,"step_2" )}
'''.split()
lowerCamelCase__ = run_command(self._launch_args + testargs ,return_stdout=_lowerCAmelCase )
if torch.cuda.is_available():
lowerCamelCase__ = torch.cuda.device_count()
else:
lowerCamelCase__ = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" ,_lowerCAmelCase )
self.assertIn("""epoch 1:""" ,_lowerCAmelCase )
else:
self.assertIn("""epoch 0:""" ,_lowerCAmelCase )
self.assertIn("""epoch 1:""" ,_lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ ,{"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
lowerCamelCase__ = run_command(self._launch_args + testargs ,return_stdout=_lowerCAmelCase )
lowerCamelCase__ = re.findall("""({.+})""" ,_lowerCAmelCase )
lowerCamelCase__ = [r for r in results if """accuracy""" in r][-1]
lowerCamelCase__ = ast.literal_eval(_lowerCAmelCase )
self.assertGreaterEqual(results["""accuracy"""] ,0.75 )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def UpperCamelCase_ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
lowerCamelCase__ = F'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase ,"""tracking""" ) ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 50 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def a_ ( ) -> Optional[int]:
_snake_case , _snake_case = 9, 14 # noqa: F841
_snake_case = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_snake_case = defaultdict(__lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_snake_case = mst(__lowercase )
_snake_case = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_snake_case = tuple(answer[:2] )
_snake_case = tuple(edge[::-1] )
assert edge in result or reverse in result | 686 | 0 |
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
a__ : int = {'UserAgent': UserAgent().random}
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> dict:
"""simple docstring"""
UpperCAmelCase = script.contents[0]
UpperCAmelCase = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : str , a__ : str ):
UpperCAmelCase = f"https://www.instagram.com/{username}/"
UpperCAmelCase = self.get_json()
def __snake_case ( self : Any ):
UpperCAmelCase = requests.get(self.url , headers=a__ ).text
UpperCAmelCase = BeautifulSoup(a__ , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Tuple ):
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self : Optional[int] ):
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def __snake_case ( self : Tuple ):
return self.user_data["username"]
@property
def __snake_case ( self : List[str] ):
return self.user_data["full_name"]
@property
def __snake_case ( self : int ):
return self.user_data["biography"]
@property
def __snake_case ( self : Tuple ):
return self.user_data["business_email"]
@property
def __snake_case ( self : Union[str, Any] ):
return self.user_data["external_url"]
@property
def __snake_case ( self : str ):
return self.user_data["edge_followed_by"]["count"]
@property
def __snake_case ( self : List[Any] ):
return self.user_data["edge_follow"]["count"]
@property
def __snake_case ( self : List[str] ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __snake_case ( self : List[str] ):
return self.user_data["profile_pic_url_hd"]
@property
def __snake_case ( self : Union[str, Any] ):
return self.user_data["is_verified"]
@property
def __snake_case ( self : int ):
return self.user_data["is_private"]
def __snake_case ( SCREAMING_SNAKE_CASE_ : str = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
UpperCAmelCase = InstagramUser(SCREAMING_SNAKE_CASE_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , SCREAMING_SNAKE_CASE_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Optional[int] = InstagramUser('github')
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 51 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Tuple = ["transformers", "torch", "note_seq"]
def __init__( self : List[Any] , *lowercase : List[Any] , **lowercase : Dict ):
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] ) | 686 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __A ( a_ :Optional[int]) -> Tuple:
__a : str = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
))
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
))
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
))
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
))
return embed
def __A ( a_ :Dict , a_ :Optional[Any]) -> Optional[int]:
__a : int = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
))
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
))
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight"""))
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias"""))
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight"""))
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias"""))
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight"""))
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias"""))
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight"""))
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias"""))
return attention_weights
def __A ( a_ :Tuple) -> str:
__a : str = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", '''stage2.cls_token'''))
return token
def __A ( ) -> Union[str, Any]:
__a : Tuple = []
head.append(('''layernorm.weight''', '''norm.weight'''))
head.append(('''layernorm.bias''', '''norm.bias'''))
head.append(('''classifier.weight''', '''head.weight'''))
head.append(('''classifier.bias''', '''head.bias'''))
return head
def __A ( a_ :Tuple , a_ :Tuple , a_ :int , a_ :List[str]) -> str:
__a : Union[str, Any] = '''imagenet-1k-id2label.json'''
__a : Optional[int] = 10_00
__a : str = '''huggingface/label-files'''
__a : str = num_labels
__a : List[Any] = json.load(open(cached_download(hf_hub_url(a_ , a_ , repo_type='''dataset''')) , '''r'''))
__a : Dict = {int(a_): v for k, v in idalabel.items()}
__a : Tuple = idalabel
__a : str = {v: k for k, v in idalabel.items()}
__a : Any = CvtConfig(num_labels=a_ , idalabel=a_ , labelaid=a_)
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1)[-1][4:6] == "13":
__a : int = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1)[-1][4:6] == "21":
__a : Optional[Any] = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__a : Tuple = [2, 2, 20]
__a : List[str] = [3, 12, 16]
__a : Optional[Any] = [1_92, 7_68, 10_24]
__a : Dict = CvtForImageClassification(a_)
__a : str = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''')
__a : List[str] = image_size
__a : Optional[int] = torch.load(a_ , map_location=torch.device('''cpu'''))
__a : Any = OrderedDict()
__a : int = []
for idx in range(len(config.depth)):
if config.cls_token[idx]:
__a : int = list_of_state_dict + cls_token(a_)
__a : Tuple = list_of_state_dict + embeddings(a_)
for cnt in range(config.depth[idx]):
__a : List[str] = list_of_state_dict + attention(a_ , a_)
__a : List[Any] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(a_)
for i in range(len(a_)):
__a : List[Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(a_)
model.save_pretrained(a_)
image_processor.save_pretrained(a_)
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path) | 52 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a_ ( ) -> Optional[Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__lowercase ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def a_ ( ) -> Optional[int]:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def a_ ( ) -> Dict:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__lowercase ):
http_head('https://huggingface.co' ) | 686 | 0 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = ["""image_processor""", """tokenizer"""]
a_ = """OwlViTImageProcessor"""
a_ = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Optional[int] , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[int]=None , **lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
__lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowerCAmelCase_ , )
__lowerCAmelCase = kwargs.pop('feature_extractor' )
__lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __call__( self : List[Any] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Union[str, Any]="max_length" , lowerCAmelCase_ : int="np" , **lowerCAmelCase_ : str ) -> Tuple:
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or (isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and not isinstance(text[0] , lowerCAmelCase_ )):
__lowerCAmelCase = [self.tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )]
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(text[0] , lowerCAmelCase_ ):
__lowerCAmelCase = []
# Maximum number of queries across batch
__lowerCAmelCase = max([len(lowerCAmelCase_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowerCAmelCase_ ) != max_num_queries:
__lowerCAmelCase = t + [' '] * (max_num_queries - len(lowerCAmelCase_ ))
__lowerCAmelCase = self.tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
encodings.append(lowerCAmelCase_ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__lowerCAmelCase = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__lowerCAmelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__lowerCAmelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__lowerCAmelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__lowerCAmelCase = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
__lowerCAmelCase = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__lowerCAmelCase = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
__lowerCAmelCase = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__lowerCAmelCase = BatchEncoding()
__lowerCAmelCase = input_ids
__lowerCAmelCase = attention_mask
if query_images is not None:
__lowerCAmelCase = BatchEncoding()
__lowerCAmelCase = self.image_processor(
lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ ).pixel_values
__lowerCAmelCase = query_pixel_values
if images is not None:
__lowerCAmelCase = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if text is not None and images is not None:
__lowerCAmelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase_ ) , tensor_type=lowerCAmelCase_ )
def lowercase ( self : str , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : List[str] ) -> List[str]:
return self.image_processor.post_process(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Tuple ) -> Optional[Any]:
return self.image_processor.post_process_object_detection(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : List[str] ) -> Dict:
return self.image_processor.post_process_image_guided_detection(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : str , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : List[str] ) -> Dict:
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowercase ( self : Dict ) -> int:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowerCAmelCase_ , )
return self.image_processor_class
@property
def lowercase ( self : Union[str, Any] ) -> List[Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowerCAmelCase_ , )
return self.image_processor
| 53 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCamelCase : Optional[int] = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
_lowerCamelCase : List[str] = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
_lowerCamelCase : Dict = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def A ( self : Union[str, Any] , lowercase : Tuple , lowercase : Optional[Any] , lowercase : int=None , lowercase : str=True , lowercase : List[str]=False ):
'''simple docstring'''
if rouge_types is None:
_snake_case = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
_snake_case = rouge_scorer.RougeScorer(rouge_types=lowercase , use_stemmer=lowercase )
if use_aggregator:
_snake_case = scoring.BootstrapAggregator()
else:
_snake_case = []
for ref, pred in zip(lowercase , lowercase ):
_snake_case = scorer.score(lowercase , lowercase )
if use_aggregator:
aggregator.add_scores(lowercase )
else:
scores.append(lowercase )
if use_aggregator:
_snake_case = aggregator.aggregate()
else:
_snake_case = {}
for key in scores[0]:
_snake_case = [score[key] for score in scores]
return result | 686 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: Any ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =tempfile.mkdtemp()
# fmt: off
UpperCAmelCase_ =["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"]
# fmt: on
UpperCAmelCase_ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
UpperCAmelCase_ ={
"do_resize": True,
"size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.5, 0.5, 0.5],
"image_std": [0.5, 0.5, 0.5],
}
UpperCAmelCase_ =os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: str , **_lowerCAmelCase: List[Any] ) -> Dict:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Union[str, Any] , **_lowerCAmelCase: Tuple ) -> Optional[Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Any ) -> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self: Optional[int] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase_ =[Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self: int ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizer()
UpperCAmelCase_ =self.get_image_processor()
UpperCAmelCase_ =VisionTextDualEncoderProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ =VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
def lowerCAmelCase__ ( self: List[str] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCAmelCase_ =self.get_image_processor(do_normalize=_lowerCAmelCase , padding_value=1.0 )
UpperCAmelCase_ =VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =self.get_image_processor()
UpperCAmelCase_ =self.get_tokenizer()
UpperCAmelCase_ =VisionTextDualEncoderProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
UpperCAmelCase_ =self.prepare_image_inputs()
UpperCAmelCase_ =image_processor(_lowerCAmelCase , return_tensors="np" )
UpperCAmelCase_ =processor(images=_lowerCAmelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_image_processor()
UpperCAmelCase_ =self.get_tokenizer()
UpperCAmelCase_ =VisionTextDualEncoderProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
UpperCAmelCase_ ="lower newer"
UpperCAmelCase_ =processor(text=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer(_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self: List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.get_image_processor()
UpperCAmelCase_ =self.get_tokenizer()
UpperCAmelCase_ =VisionTextDualEncoderProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
UpperCAmelCase_ ="lower newer"
UpperCAmelCase_ =self.prepare_image_inputs()
UpperCAmelCase_ =processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with self.assertRaises(_lowerCAmelCase ):
processor()
def lowerCAmelCase__ ( self: List[str] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_image_processor()
UpperCAmelCase_ =self.get_tokenizer()
UpperCAmelCase_ =VisionTextDualEncoderProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
UpperCAmelCase_ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ =processor.batch_decode(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.get_image_processor()
UpperCAmelCase_ =self.get_tokenizer()
UpperCAmelCase_ =VisionTextDualEncoderProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
UpperCAmelCase_ ="lower newer"
UpperCAmelCase_ =self.prepare_image_inputs()
UpperCAmelCase_ =processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 54 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = "swin2sr"
_UpperCAmelCase : Optional[int] = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[int] , lowercase : List[Any]=64 , lowercase : int=1 , lowercase : Union[str, Any]=3 , lowercase : Dict=180 , lowercase : List[Any]=[6, 6, 6, 6, 6, 6] , lowercase : Dict=[6, 6, 6, 6, 6, 6] , lowercase : List[Any]=8 , lowercase : List[str]=2.0 , lowercase : Tuple=True , lowercase : Union[str, Any]=0.0 , lowercase : Dict=0.0 , lowercase : Optional[int]=0.1 , lowercase : int="gelu" , lowercase : List[str]=False , lowercase : List[Any]=0.02 , lowercase : List[Any]=1E-5 , lowercase : Optional[int]=2 , lowercase : Tuple=1.0 , lowercase : List[Any]="1conv" , lowercase : List[Any]="pixelshuffle" , **lowercase : List[str] , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = embed_dim
_snake_case = depths
_snake_case = len(lowercase )
_snake_case = num_heads
_snake_case = window_size
_snake_case = mlp_ratio
_snake_case = qkv_bias
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = drop_path_rate
_snake_case = hidden_act
_snake_case = use_absolute_embeddings
_snake_case = layer_norm_eps
_snake_case = initializer_range
_snake_case = upscale
_snake_case = img_range
_snake_case = resi_connection
_snake_case = upsampler | 686 | 0 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = (CMStochasticIterativeScheduler,)
snake_case_ = 10
def UpperCamelCase_ ( self : str ,**A : List[str] ):
__A = {
"num_train_timesteps": 2_01,
"sigma_min": 0.0_02,
"sigma_max": 80.0,
}
config.update(**A )
return config
def UpperCamelCase_ ( self : int ):
__A = 10
__A = self.get_scheduler_config()
__A = self.scheduler_classes[0](**A )
scheduler.set_timesteps(A )
__A = scheduler.timesteps[0]
__A = scheduler.timesteps[1]
__A = self.dummy_sample
__A = 0.1 * sample
__A = scheduler.step(A ,A ,A ).prev_sample
__A = scheduler.step(A ,A ,A ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def UpperCamelCase_ ( self : Dict ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=A )
def UpperCamelCase_ ( self : Dict ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=A )
def UpperCamelCase_ ( self : str ):
__A = self.scheduler_classes[0]
__A = self.get_scheduler_config()
__A = scheduler_class(**A )
__A = 1
scheduler.set_timesteps(A )
__A = scheduler.timesteps
__A = torch.manual_seed(0 )
__A = self.dummy_model()
__A = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(A ):
# 1. scale model input
__A = scheduler.scale_model_input(A ,A )
# 2. predict noise residual
__A = model(A ,A )
# 3. predict previous sample x_t-1
__A = scheduler.step(A ,A ,A ,generator=A ).prev_sample
__A = pred_prev_sample
__A = torch.sum(torch.abs(A ) )
__A = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2
assert abs(result_mean.item() - 0.25_10 ) < 1E-3
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.scheduler_classes[0]
__A = self.get_scheduler_config()
__A = scheduler_class(**A )
__A = [1_06, 0]
scheduler.set_timesteps(timesteps=A )
__A = scheduler.timesteps
__A = torch.manual_seed(0 )
__A = self.dummy_model()
__A = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__A = scheduler.scale_model_input(A ,A )
# 2. predict noise residual
__A = model(A ,A )
# 3. predict previous sample x_t-1
__A = scheduler.step(A ,A ,A ,generator=A ).prev_sample
__A = pred_prev_sample
__A = torch.sum(torch.abs(A ) )
__A = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2
assert abs(result_mean.item() - 0.45_27 ) < 1E-3
def UpperCamelCase_ ( self : Dict ):
__A = self.scheduler_classes[0]
__A = self.get_scheduler_config()
__A = scheduler_class(**A )
__A = [39, 30, 12, 15, 0]
with self.assertRaises(A ,msg="`timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.scheduler_classes[0]
__A = self.get_scheduler_config()
__A = scheduler_class(**A )
__A = [39, 30, 12, 1, 0]
__A = len(A )
with self.assertRaises(A ,msg="Can only pass one of `num_inference_steps` or `timesteps`." ):
scheduler.set_timesteps(num_inference_steps=A ,timesteps=A )
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.scheduler_classes[0]
__A = self.get_scheduler_config()
__A = scheduler_class(**A )
__A = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A ,msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" ,):
scheduler.set_timesteps(timesteps=A )
| 55 |
import random
def a_ ( __lowercase : str , __lowercase : Any , __lowercase : Any ) -> Optional[Any]:
_snake_case = a[left_index]
_snake_case = left_index + 1
for j in range(left_index + 1 , __lowercase ):
if a[j] < pivot:
_snake_case , _snake_case = a[i], a[j]
i += 1
_snake_case , _snake_case = a[i - 1], a[left_index]
return i - 1
def a_ ( __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Optional[int] ) -> Tuple:
if left < right:
_snake_case = random.randint(__lowercase , right - 1 )
_snake_case , _snake_case = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_snake_case = partition(__lowercase , __lowercase , __lowercase )
quick_sort_random(
__lowercase , __lowercase , __lowercase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__lowercase , pivot_index + 1 , __lowercase ) # recursive quicksort to the right of the pivot point
def a_ ( ) -> str:
_snake_case = input('Enter numbers separated by a comma:\n' ).strip()
_snake_case = [int(__lowercase ) for item in user_input.split(',' )]
quick_sort_random(__lowercase , 0 , len(__lowercase ) )
print(__lowercase )
if __name__ == "__main__":
main() | 686 | 0 |
'''simple docstring'''
# flake8: noqa
# Lint as: python3
_a : Tuple = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 56 |
import math
def a_ ( __lowercase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a_ ( __lowercase : float = 0.1 ) -> int:
_snake_case = 3
_snake_case = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__lowercase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A_ : Union[str, Any] = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def snake_case (UpperCAmelCase__ ) -> int:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase__ )
def snake_case (UpperCAmelCase__ ) -> Tuple:
from transformers.testing_utils import pytest_terminal_summary_main
UpperCamelCase_: Union[str, Any] = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase__ , id=UpperCAmelCase__ ) | 57 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = "resnet"
_UpperCAmelCase : Any = ["basic", "bottleneck"]
def __init__( self : Union[str, Any] , lowercase : Dict=3 , lowercase : Any=64 , lowercase : Any=[256, 512, 1_024, 2_048] , lowercase : Dict=[3, 4, 6, 3] , lowercase : Any="bottleneck" , lowercase : Optional[Any]="relu" , lowercase : Dict=False , lowercase : str=None , lowercase : Tuple=None , **lowercase : List[Any] , ):
'''simple docstring'''
super().__init__(**lowercase )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
_snake_case = num_channels
_snake_case = embedding_size
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = layer_type
_snake_case = hidden_act
_snake_case = downsample_in_first_stage
_snake_case = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(lowercase ) + 1 )]
_snake_case , _snake_case = get_aligned_output_features_output_indices(
out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = version.parse("1.11" )
@property
def A ( self : int ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
return 1E-3 | 686 | 0 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowerCAmelCase : Any = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase : List[str] = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
__lowerCAmelCase : List[str] = spec.loader.load_module()
__lowerCAmelCase : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__lowerCAmelCase : Tuple = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
__lowerCAmelCase : Union[str, Any] = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = []
for config_class in list(CONFIG_MAPPING.values() ):
snake_case_ : Union[str, Any] = False
# source code of `config_class`
snake_case_ : int = inspect.getsource(__UpperCamelCase )
snake_case_ : str = _re_checkpoint.findall(__UpperCamelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
snake_case_ , snake_case_ : int = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
snake_case_ : Dict = F'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
snake_case_ : Optional[Any] = True
break
snake_case_ : Any = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
snake_case_ : Any = """\n""".join(sorted(__UpperCamelCase ) )
raise ValueError(F'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 58 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : List[Any] , lowercase : Union[str, Any] , lowercase : int ):
'''simple docstring'''
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(lowercase ) for s in shape] )}.npy'''
def A ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def A ( self : List[Any] , lowercase : Tuple=0 , lowercase : Optional[int]=(4, 4, 64, 64) , lowercase : Optional[int]=False ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase )
return image
def A ( self : Tuple , lowercase : Any=False , lowercase : Union[str, Any]="CompVis/stable-diffusion-v1-4" ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = 'bf16' if fpaa else None
_snake_case , _snake_case = FlaxUNetaDConditionModel.from_pretrained(
lowercase , subfolder='unet' , dtype=lowercase , revision=lowercase )
return model, params
def A ( self : Union[str, Any] , lowercase : str=0 , lowercase : Optional[Any]=(4, 77, 768) , lowercase : int=False ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def A ( self : Tuple , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : List[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=lowercase )
_snake_case = self.get_latents(lowercase , fpaa=lowercase )
_snake_case = self.get_encoder_hidden_states(lowercase , fpaa=lowercase )
_snake_case = model.apply(
{'params': params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
_snake_case = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_snake_case = jnp.array(lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def A ( self : str , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : List[str] ):
'''simple docstring'''
_snake_case , _snake_case = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=lowercase )
_snake_case = self.get_latents(lowercase , shape=(4, 4, 96, 96) , fpaa=lowercase )
_snake_case = self.get_encoder_hidden_states(lowercase , shape=(4, 77, 1_024) , fpaa=lowercase )
_snake_case = model.apply(
{'params': params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
_snake_case = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_snake_case = jnp.array(lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1E-2 ) | 686 | 0 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def lowerCAmelCase_ ( __a ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: List[Any] =split_dict._to_yaml_list()
assert len(__a ) == len(__a )
lowerCamelCase__: List[str] =SplitDict._from_yaml_list(__a )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCamelCase__: Any =None
# the split name of split_dict takes over the name of the split info object
lowerCamelCase__: str =split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=__a ), SplitInfo(dataset_name="my_dataset" )] )
def lowerCAmelCase_ ( __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: List[str] =asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 59 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( __lowercase : Any ) -> List[Any]:
_snake_case = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a_ ( __lowercase : Dict ) -> Tuple:
_snake_case , _snake_case = emb.weight.shape
_snake_case = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_snake_case = emb.weight.data
return lin_layer
def a_ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None ) -> Tuple:
_snake_case = {}
for old_key in state_dict.keys():
_snake_case = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_snake_case = key.replace('moe_layer.experts.0' , f'''ffn.experts.expert_{expert_idx}''' )
else:
_snake_case = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
_snake_case = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
_snake_case = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
_snake_case = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
_snake_case = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
_snake_case = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
_snake_case = key.replace('final_layer_norm' , 'ff_layer_norm' )
_snake_case = state_dict[old_key]
return new_dict
def a_ ( __lowercase : Optional[Any] , __lowercase : Tuple , __lowercase : Any , __lowercase : List[str] , __lowercase : str = WEIGHTS_NAME ) -> Union[str, Any]:
_snake_case = []
_snake_case = 0
os.makedirs(__lowercase , exist_ok=__lowercase )
for expert in range(__lowercase ):
_snake_case = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(__lowercase ):
_snake_case = torch.load(__lowercase )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = os.path.join(
__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
torch.save(__lowercase , __lowercase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__lowercase )[0]].dtype )
# Add the last block
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
_snake_case = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__lowercase ) == 1:
_snake_case = os.path.join(__lowercase , __lowercase )
torch.save(__lowercase , __lowercase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__lowercase , __lowercase )
# Otherwise, let's build the index
_snake_case = {}
for idx, shard in enumerate(__lowercase ):
_snake_case = weights_name.replace('.bin' , f'''-{idx+1:05d}-of-{len(__lowercase ):05d}.bin''' )
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__lowercase , os.path.join(__lowercase , __lowercase ) )
for key in shard:
_snake_case = shard_file
# Add the metadata
_snake_case = {'total_size': total_size}
_snake_case = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(__lowercase , __lowercase ) , 'w' , encoding='utf-8' ) as f:
_snake_case = json.dumps(__lowercase , indent=2 , sort_keys=__lowercase ) + '\n'
f.write(__lowercase )
return metadata, index
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
_lowerCamelCase : List[str] = parser.parse_args()
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_lowerCamelCase : Tuple = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_lowerCamelCase : Dict = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path) | 686 | 0 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__="resnet50" , __magic_name__=3 , __magic_name__=32 , __magic_name__=3 , __magic_name__=True , __magic_name__=True , ) -> str:
'''simple docstring'''
snake_case_ : int = parent
snake_case_ : Union[str, Any] = out_indices if out_indices is not None else [4]
snake_case_ : Union[str, Any] = stage_names
snake_case_ : Tuple = out_features
snake_case_ : Any = backbone
snake_case_ : Dict = batch_size
snake_case_ : Any = image_size
snake_case_ : Dict = num_channels
snake_case_ : int = use_pretrained_backbone
snake_case_ : Any = is_training
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Optional[int] = self.get_config()
return config, pixel_values
def lowerCamelCase (self ) -> int:
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = TimmBackbone(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
snake_case_ : List[Any] = model(__magic_name__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.prepare_config_and_inputs()
snake_case_ , snake_case_ : Any = config_and_inputs
snake_case_ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __lowerCAmelCase ( _a, _a, _a, unittest.TestCase ):
lowerCamelCase_ : List[str] = (TimmBackbone,) if is_torch_available() else ()
lowerCamelCase_ : Tuple = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
lowerCamelCase_ : int = False
lowerCamelCase_ : int = False
lowerCamelCase_ : str = False
lowerCamelCase_ : Any = False
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : str = TimmBackboneModelTester(self )
snake_case_ : int = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = '''resnet18'''
snake_case_ : List[str] = '''microsoft/resnet-18'''
snake_case_ : Union[str, Any] = AutoBackbone.from_pretrained(__magic_name__ , use_timm_backbone=__magic_name__ )
snake_case_ : Any = AutoBackbone.from_pretrained(__magic_name__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
snake_case_ : Optional[int] = AutoBackbone.from_pretrained(__magic_name__ , use_timm_backbone=__magic_name__ , out_indices=[1, 2, 3] )
snake_case_ : Dict = AutoBackbone.from_pretrained(__magic_name__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ , snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Tuple = model_class(__magic_name__ )
snake_case_ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Optional[Any] = [*signature.parameters.keys()]
snake_case_ : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __magic_name__ )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ , snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : int = True
snake_case_ : Any = self.has_attentions
# no need to test all models as different heads yield the same functionality
snake_case_ : List[str] = self.all_model_classes[0]
snake_case_ : Union[str, Any] = model_class(__magic_name__ )
model.to(__magic_name__ )
snake_case_ : Tuple = self._prepare_for_class(__magic_name__ , __magic_name__ )
snake_case_ : Optional[int] = model(**__magic_name__ )
snake_case_ : Union[str, Any] = outputs[0][-1]
# Encoder-/Decoder-only models
snake_case_ : Optional[int] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
snake_case_ : Dict = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__magic_name__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
snake_case_ , snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : List[Any] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : Tuple = model(**__magic_name__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
snake_case_ : List[Any] = copy.deepcopy(__magic_name__ )
snake_case_ : List[Any] = None
snake_case_ : Tuple = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : int = model(**__magic_name__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
snake_case_ : Optional[Any] = copy.deepcopy(__magic_name__ )
snake_case_ : List[Any] = False
snake_case_ : Tuple = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : Dict = model(**__magic_name__ )
| 60 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_lowerCamelCase : List[Any] = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_lowerCamelCase : Any = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_lowerCamelCase : Union[str, Any] = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def a_ ( __lowercase : List[Any] , __lowercase : Any ) -> Union[str, Any]:
return float((preds == labels).mean() )
def a_ ( __lowercase : Optional[Any] , __lowercase : List[str] ) -> Dict:
_snake_case = simple_accuracy(__lowercase , __lowercase )
_snake_case = float(fa_score(y_true=__lowercase , y_pred=__lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( __lowercase : int , __lowercase : str ) -> str:
_snake_case = float(pearsonr(__lowercase , __lowercase )[0] )
_snake_case = float(spearmanr(__lowercase , __lowercase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def A ( self : List[Any] , lowercase : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowercase , lowercase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowercase , lowercase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowercase , lowercase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' ) | 686 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def a ( self : Any ) -> int:
lowerCAmelCase__ = "ZinengTang/tvlt-base"
lowerCAmelCase__ = tempfile.mkdtemp()
def a ( self : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
return TvltImageProcessor.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE__ )
def a ( self : int , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> str:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE__ )
def a ( self : List[str] ) -> Any:
shutil.rmtree(self.tmpdirname )
def a ( self : Any ) -> Union[str, Any]:
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple ) -> List[Any]:
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = np.ones([12_000] )
lowerCAmelCase__ = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors="np" )
lowerCAmelCase__ = processor(audio=SCREAMING_SNAKE_CASE__ , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a ( self : Dict ) -> str:
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = np.ones([3, 224, 224] )
lowerCAmelCase__ = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors="np" )
lowerCAmelCase__ = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a ( self : int ) -> Any:
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = np.ones([12_000] )
lowerCAmelCase__ = np.ones([3, 224, 224] )
lowerCAmelCase__ = processor(audio=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
processor()
def a ( self : Tuple ) -> Optional[Any]:
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 61 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_lowerCamelCase : Dict = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = "sequence-classification"
def __init__( self : Optional[int] , lowercase : Any ):
'''simple docstring'''
if type(lowercase ) == dict:
_snake_case = Namespace(**lowercase )
_snake_case = glue_output_modes[hparams.task]
_snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(lowercase , lowercase , self.mode )
def A ( self : Optional[Any] , **lowercase : Optional[Any] ):
'''simple docstring'''
return self.model(**lowercase )
def A ( self : Optional[Any] , lowercase : str , lowercase : Tuple ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case = outputs[0]
_snake_case = self.trainer.lr_schedulers[0]['scheduler']
_snake_case = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.hparams
_snake_case = processors[args.task]()
_snake_case = processor.get_labels()
for mode in ["train", "dev"]:
_snake_case = self._feature_file(lowercase )
if os.path.exists(lowercase ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , lowercase )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_snake_case = convert_examples_to_features(
lowercase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , lowercase )
torch.save(lowercase , lowercase )
def A ( self : Dict , lowercase : str , lowercase : int , lowercase : bool = False ):
'''simple docstring'''
_snake_case = 'dev' if mode == 'test' else mode
_snake_case = self._feature_file(lowercase )
logger.info('Loading features from cached file %s' , lowercase )
_snake_case = torch.load(lowercase )
_snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(lowercase , lowercase , lowercase , lowercase ) , batch_size=lowercase , shuffle=lowercase , )
def A ( self : str , lowercase : Optional[Any] , lowercase : str ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case , _snake_case = outputs[:2]
_snake_case = logits.detach().cpu().numpy()
_snake_case = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : int , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_snake_case = np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_snake_case = np.argmax(lowercase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_snake_case = np.squeeze(lowercase )
_snake_case = np.concatenate([x['target'] for x in outputs] , axis=0 )
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , lowercase , lowercase )}
_snake_case = dict(results.items() )
_snake_case = results
return ret, preds_list, out_label_list
def A ( self : int , lowercase : list ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : List[str] , lowercase : Any ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( lowercase : Tuple , lowercase : Any ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(lowercase , lowercase )
parser.add_argument(
'--max_seq_length' , default=128 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=lowercase , required=lowercase , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=lowercase , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def a_ ( ) -> Union[str, Any]:
_snake_case = argparse.ArgumentParser()
add_generic_args(__lowercase , os.getcwd() )
_snake_case = GLUETransformer.add_model_specific_args(__lowercase , os.getcwd() )
_snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_snake_case = os.path.join(
'./results' , f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_snake_case = GLUETransformer(__lowercase )
_snake_case = generic_train(__lowercase , __lowercase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_snake_case = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=__lowercase ) )
_snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__lowercase )
if __name__ == "__main__":
main() | 686 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
snake_case = [
"""openmmlab/upernet-convnext-tiny""",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
snake_case = """UperNetConfig"""
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[int, Tuple[int, int]] , UpperCAmelCase_ : Union[int, Tuple[int, int], str] = 0 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Union[int, Tuple[int, int]] = 1 , ):
super().__init__()
SCREAMING_SNAKE_CASE : str = nn.Convad(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=UpperCAmelCase_ , padding=UpperCAmelCase_ , bias=UpperCAmelCase_ , dilation=UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.BatchNormad(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = nn.ReLU()
def _A ( self : Union[str, Any] , UpperCAmelCase_ : torch.Tensor ):
SCREAMING_SNAKE_CASE : Any = self.conv(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.batch_norm(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.activation(UpperCAmelCase_ )
return output
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
super().__init__()
SCREAMING_SNAKE_CASE : str = [
nn.AdaptiveAvgPoolad(UpperCAmelCase_ ),
UperNetConvModule(UpperCAmelCase_ , UpperCAmelCase_ , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(UpperCAmelCase_ ) , UpperCAmelCase_ )
def _A ( self : Any , UpperCAmelCase_ : torch.Tensor ):
SCREAMING_SNAKE_CASE : Optional[int] = input
for layer in self.layers:
SCREAMING_SNAKE_CASE : Optional[Any] = layer(UpperCAmelCase_ )
return hidden_state
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : Tuple[int, ...] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : bool ):
super().__init__()
SCREAMING_SNAKE_CASE : Dict = pool_scales
SCREAMING_SNAKE_CASE : Optional[int] = align_corners
SCREAMING_SNAKE_CASE : Union[str, Any] = in_channels
SCREAMING_SNAKE_CASE : List[str] = channels
SCREAMING_SNAKE_CASE : str = []
for i, pool_scale in enumerate(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = UperNetPyramidPoolingBlock(pool_scale=UpperCAmelCase_ , in_channels=UpperCAmelCase_ , channels=UpperCAmelCase_ )
self.blocks.append(UpperCAmelCase_ )
self.add_module(str(UpperCAmelCase_ ) , UpperCAmelCase_ )
def _A ( self : Tuple , UpperCAmelCase_ : torch.Tensor ):
SCREAMING_SNAKE_CASE : List[Any] = []
for ppm in self.blocks:
SCREAMING_SNAKE_CASE : Dict = ppm(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = nn.functional.interpolate(
UpperCAmelCase_ , size=x.size()[2:] , mode="bilinear" , align_corners=self.align_corners )
ppm_outs.append(UpperCAmelCase_ )
return ppm_outs
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] ):
super().__init__()
SCREAMING_SNAKE_CASE : Tuple = config
SCREAMING_SNAKE_CASE : List[str] = config.pool_scales # e.g. (1, 2, 3, 6)
SCREAMING_SNAKE_CASE : Dict = in_channels
SCREAMING_SNAKE_CASE : str = config.hidden_size
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Any = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
SCREAMING_SNAKE_CASE : Tuple = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
SCREAMING_SNAKE_CASE : Tuple = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.ModuleList()
SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
SCREAMING_SNAKE_CASE : Optional[Any] = UperNetConvModule(UpperCAmelCase_ , self.channels , kernel_size=1 )
SCREAMING_SNAKE_CASE : Dict = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(UpperCAmelCase_ )
self.fpn_convs.append(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def _A ( self : Optional[int] ):
self.apply(self._init_weights )
def _A ( self : Tuple , UpperCAmelCase_ : Union[str, Any] ):
if isinstance(UpperCAmelCase_ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _A ( self : Tuple , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE : Any = inputs[-1]
SCREAMING_SNAKE_CASE : Union[str, Any] = [x]
psp_outs.extend(self.psp_modules(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat(UpperCAmelCase_ , dim=1 )
SCREAMING_SNAKE_CASE : Optional[Any] = self.bottleneck(UpperCAmelCase_ )
return output
def _A ( self : Dict , UpperCAmelCase_ : torch.Tensor ):
# build laterals
SCREAMING_SNAKE_CASE : Tuple = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(UpperCAmelCase_ ) )
# build top-down path
SCREAMING_SNAKE_CASE : Union[str, Any] = len(UpperCAmelCase_ )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE : Optional[int] = laterals[i - 1].shape[2:]
SCREAMING_SNAKE_CASE : List[str] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=UpperCAmelCase_ , mode="bilinear" , align_corners=self.align_corners )
# build outputs
SCREAMING_SNAKE_CASE : Any = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE : List[Any] = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="bilinear" , align_corners=self.align_corners )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(UpperCAmelCase_ , dim=1 )
SCREAMING_SNAKE_CASE : Tuple = self.fpn_bottleneck(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.classifier(UpperCAmelCase_ )
return output
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : Union[int, Tuple[int, int]] = 1 ):
super().__init__()
SCREAMING_SNAKE_CASE : int = config
SCREAMING_SNAKE_CASE : Optional[Any] = config.auxiliary_in_channels
SCREAMING_SNAKE_CASE : List[str] = config.auxiliary_channels
SCREAMING_SNAKE_CASE : List[Any] = config.auxiliary_num_convs
SCREAMING_SNAKE_CASE : Tuple = config.auxiliary_concat_input
SCREAMING_SNAKE_CASE : Optional[int] = in_index
SCREAMING_SNAKE_CASE : List[Any] = (kernel_size // 2) * dilation
SCREAMING_SNAKE_CASE : int = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=UpperCAmelCase_ , padding=UpperCAmelCase_ , dilation=UpperCAmelCase_ ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=UpperCAmelCase_ , padding=UpperCAmelCase_ , dilation=UpperCAmelCase_ ) )
if self.num_convs == 0:
SCREAMING_SNAKE_CASE : Optional[int] = nn.Identity()
else:
SCREAMING_SNAKE_CASE : List[str] = nn.Sequential(*UpperCAmelCase_ )
if self.concat_input:
SCREAMING_SNAKE_CASE : Dict = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=UpperCAmelCase_ , padding=kernel_size // 2 )
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def _A ( self : Optional[int] ):
self.apply(self._init_weights )
def _A ( self : int , UpperCAmelCase_ : Union[str, Any] ):
if isinstance(UpperCAmelCase_ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _A ( self : Tuple , UpperCAmelCase_ : torch.Tensor ):
# just take the relevant feature maps
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_hidden_states[self.in_index]
SCREAMING_SNAKE_CASE : Any = self.convs(UpperCAmelCase_ )
if self.concat_input:
SCREAMING_SNAKE_CASE : str = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
SCREAMING_SNAKE_CASE : int = self.classifier(UpperCAmelCase_ )
return output
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = UperNetConfig
UpperCamelCase_ : List[str] = '''pixel_values'''
UpperCamelCase_ : List[str] = True
def _A ( self : Tuple , UpperCAmelCase_ : Tuple ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _A ( self : Tuple ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _A ( self : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict=False ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = value
snake_case = r"""
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
snake_case = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.''' , lowerCAmelCase , )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase_ : Any ):
super().__init__(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetHead(UpperCAmelCase_ , in_channels=self.backbone.channels )
SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetFCNHead(UpperCAmelCase_ ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("batch_size, sequence_length" ) )
@replace_return_docstrings(output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC )
def _A ( self : Union[str, Any] , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE : int = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE : List[Any] = output_attentions if output_attentions is not None else self.config.output_attentions
SCREAMING_SNAKE_CASE : Tuple = self.backbone.forward_with_filtered_kwargs(
UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , output_attentions=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = outputs.feature_maps
SCREAMING_SNAKE_CASE : Optional[Any] = self.decode_head(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = nn.functional.interpolate(UpperCAmelCase_ , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.auxiliary_head is not None:
SCREAMING_SNAKE_CASE : Dict = self.auxiliary_head(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = nn.functional.interpolate(
UpperCAmelCase_ , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("The number of labels should be greater than one" )
else:
# compute weighted loss
SCREAMING_SNAKE_CASE : Optional[Any] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
SCREAMING_SNAKE_CASE : Dict = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
SCREAMING_SNAKE_CASE : Any = (logits,) + outputs[1:]
else:
SCREAMING_SNAKE_CASE : Any = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=UpperCAmelCase_ , logits=UpperCAmelCase_ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 62 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = LEDConfig
_UpperCAmelCase : int = {}
_UpperCAmelCase : List[str] = "gelu"
def __init__( self : Union[str, Any] , lowercase : Optional[int] , lowercase : Dict=13 , lowercase : Dict=7 , lowercase : Tuple=True , lowercase : Dict=False , lowercase : Dict=99 , lowercase : Any=32 , lowercase : List[Any]=2 , lowercase : List[str]=4 , lowercase : List[str]=37 , lowercase : Dict=0.1 , lowercase : int=0.1 , lowercase : List[Any]=20 , lowercase : int=2 , lowercase : Optional[Any]=1 , lowercase : List[str]=0 , lowercase : Optional[int]=4 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = eos_token_id
_snake_case = pad_token_id
_snake_case = bos_token_id
_snake_case = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case = prepare_led_inputs_dict(lowercase , lowercase , lowercase )
_snake_case = tf.concat(
[tf.zeros_like(lowercase )[:, :-1], tf.ones_like(lowercase )[:, -1:]] , axis=-1 , )
_snake_case = global_attention_mask
return config, inputs_dict
def A ( self : str , lowercase : str , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = TFLEDModel(config=lowercase ).get_decoder()
_snake_case = inputs_dict['input_ids']
_snake_case = input_ids[:1, :]
_snake_case = inputs_dict['attention_mask'][:1, :]
_snake_case = 1
# first forward pass
_snake_case = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
_snake_case , _snake_case = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case = model(lowercase , attention_mask=lowercase )[0]
_snake_case = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case = output_from_no_past[:, -3:, random_slice_idx]
_snake_case = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : str=None , ) -> Union[str, Any]:
if attention_mask is None:
_snake_case = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_UpperCAmelCase : Optional[int] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_UpperCAmelCase : Tuple = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCAmelCase : str = True
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : str = False
_UpperCAmelCase : List[Any] = False
def A ( self : Any ):
'''simple docstring'''
_snake_case = TFLEDModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = tf.zeros_like(inputs_dict['attention_mask'] )
_snake_case = 2
_snake_case = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
_snake_case = True
_snake_case = self.model_tester.seq_length
_snake_case = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase : List[str] ):
_snake_case = outputs.decoder_attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase : List[str] ):
_snake_case = [t.numpy() for t in outputs.encoder_attentions]
_snake_case = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = False
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
_snake_case = len(lowercase )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
if self.is_encoder_decoder:
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_decoder_attentions_output(lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
# Check attention is always last and order is fine
_snake_case = True
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase ) )
self.assertEqual(model.config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def A ( self : List[Any] ):
'''simple docstring'''
pass
def A ( self : Any ):
'''simple docstring'''
pass
def a_ ( __lowercase : str ) -> Optional[Any]:
return tf.constant(__lowercase , dtype=tf.intaa )
_lowerCamelCase : List[Any] = 1E-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 )
def A ( self : str ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 , rtol=1E-3 ) | 686 | 0 |
def lowerCamelCase__ ( __lowerCamelCase : float , __lowerCamelCase : int ):
if digit_amount > 0:
return round(number - int(__lowerCamelCase ) , __lowerCamelCase )
return number - int(__lowerCamelCase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 63 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_lowerCamelCase : Union[str, Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_lowerCamelCase : Union[str, Any] = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
_lowerCamelCase : Optional[int] = '''zero2'''
_lowerCamelCase : List[Any] = '''zero3'''
_lowerCamelCase : Dict = [ZEROa, ZEROa]
def a_ ( __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Tuple ) -> Dict:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_snake_case = parameterized.to_safe_name('_'.join(str(__lowercase ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
_lowerCamelCase : Dict = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : List[Any] , lowercase : Dict ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Any , lowercase : str , lowercase : List[str] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : Optional[Any] , lowercase : Optional[int] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
def A ( self : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
pass
def A ( self : str , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : bool = True , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = models[model]
_snake_case = self.run_trainer(
stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , )
self.do_checks(lowercase )
return output_dir
def A ( self : Any , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : int = 1 , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase )
_snake_case = f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_snake_case = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_snake_case = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_snake_case = self.get_launcher(lowercase )
_snake_case = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase , env=self.get_env() )
return output_dir
def A ( self : List[str] , lowercase : Any=False ):
'''simple docstring'''
_snake_case = min(2 , get_gpu_count() ) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split() | 686 | 0 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
lowercase_ : Any = True
from torch.cuda.amp import autocast
lowercase_ : Tuple = logging.getLogger(__name__)
def A__ ( snake_case_ : Optional[int]=None , snake_case_ : Optional[int]=None ):
return field(default_factory=lambda: default , metadata=snake_case_ )
@dataclass
class _lowerCamelCase :
__a = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__a = field(
default=UpperCamelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__a = field(
default=UpperCamelCase_ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
__a = field(
default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} )
__a = field(
default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
__a = field(
default=0.1 , metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
} , )
__a = field(
default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , )
__a = field(
default=0.05 , metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
} , )
__a = field(default=0.0 , metadata={"help": "The LayerDrop probability."} )
@dataclass
class _lowerCamelCase :
__a = field(
default=UpperCamelCase_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
__a = field(
default="train+validation" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
__a = field(
default=UpperCamelCase_ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__a = field(
default=UpperCamelCase_ , metadata={"help": "The number of processes to use for the preprocessing."} , )
__a = field(
default=UpperCamelCase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__a = field(
default=UpperCamelCase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
} , )
__a = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , )
@dataclass
class _lowerCamelCase :
__a = 42
__a = True
__a = None
__a = None
__a = None
__a = None
def __call__( self , lowerCAmelCase ) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
SCREAMING_SNAKE_CASE__: Optional[int]= [{'''input_values''': feature['''input_values''']} for feature in features]
SCREAMING_SNAKE_CASE__: Dict= [{'''input_ids''': feature['''labels''']} for feature in features]
SCREAMING_SNAKE_CASE__: Dict= self.processor.pad(
lowerCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE__: int= self.processor.pad(
labels=lowerCAmelCase , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
SCREAMING_SNAKE_CASE__: Optional[int]= labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
SCREAMING_SNAKE_CASE__: List[Any]= labels
return batch
class _lowerCamelCase ( UpperCamelCase_ ):
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> torch.Tensor:
model.train()
SCREAMING_SNAKE_CASE__: Any= self._prepare_inputs(lowerCAmelCase )
if self.use_amp:
with autocast():
SCREAMING_SNAKE_CASE__: Tuple= self.compute_loss(lowerCAmelCase , lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: List[Any]= self.compute_loss(lowerCAmelCase , lowerCAmelCase )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
SCREAMING_SNAKE_CASE__: Dict= loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
SCREAMING_SNAKE_CASE__: str= loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(f'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' )
if self.args.gradient_accumulation_steps > 1:
SCREAMING_SNAKE_CASE__: Union[str, Any]= loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCAmelCase ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCAmelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCAmelCase )
else:
loss.backward()
return loss.detach()
def A__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE__: Optional[int]= HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE__: int= None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE__: Optional[Any]= get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , snake_case_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
SCREAMING_SNAKE_CASE__: Optional[int]= datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
SCREAMING_SNAKE_CASE__: str= datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
SCREAMING_SNAKE_CASE__: Optional[int]= F'[{"".join(data_args.chars_to_ignore )}]'
def remove_special_characters(snake_case_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__: Any= re.sub(snake_case_ , '''''' , batch['''sentence'''] ).lower() + ''' '''
return batch
SCREAMING_SNAKE_CASE__: int= train_dataset.map(snake_case_ , remove_columns=['''sentence'''] )
SCREAMING_SNAKE_CASE__: List[str]= eval_dataset.map(snake_case_ , remove_columns=['''sentence'''] )
def extract_all_chars(snake_case_ : Optional[int] ):
SCREAMING_SNAKE_CASE__: int= ''' '''.join(batch['''text'''] )
SCREAMING_SNAKE_CASE__: List[Any]= list(set(snake_case_ ) )
return {"vocab": [vocab], "all_text": [all_text]}
SCREAMING_SNAKE_CASE__: Tuple= train_dataset.map(
snake_case_ , batched=snake_case_ , batch_size=-1 , keep_in_memory=snake_case_ , remove_columns=train_dataset.column_names , )
SCREAMING_SNAKE_CASE__: int= train_dataset.map(
snake_case_ , batched=snake_case_ , batch_size=-1 , keep_in_memory=snake_case_ , remove_columns=eval_dataset.column_names , )
SCREAMING_SNAKE_CASE__: Optional[Any]= list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
SCREAMING_SNAKE_CASE__: Optional[Any]= {v: k for k, v in enumerate(snake_case_ )}
SCREAMING_SNAKE_CASE__: List[str]= vocab_dict[''' ''']
del vocab_dict[" "]
SCREAMING_SNAKE_CASE__: Optional[Any]= len(snake_case_ )
SCREAMING_SNAKE_CASE__: Dict= len(snake_case_ )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(snake_case_ , snake_case_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__: str= WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
SCREAMING_SNAKE_CASE__: Dict= WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0.0 , do_normalize=snake_case_ , return_attention_mask=snake_case_ )
SCREAMING_SNAKE_CASE__: int= WavaVecaProcessor(feature_extractor=snake_case_ , tokenizer=snake_case_ )
SCREAMING_SNAKE_CASE__: str= WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE__: Any= min(len(snake_case_ ) , data_args.max_train_samples )
SCREAMING_SNAKE_CASE__: List[Any]= train_dataset.select(range(snake_case_ ) )
if data_args.max_val_samples is not None:
SCREAMING_SNAKE_CASE__: Dict= eval_dataset.select(range(data_args.max_val_samples ) )
SCREAMING_SNAKE_CASE__: Tuple= torchaudio.transforms.Resample(48_000 , 16_000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(snake_case_ : Optional[Any] ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= torchaudio.load(batch['''path'''] )
SCREAMING_SNAKE_CASE__: Dict= resampler(snake_case_ ).squeeze().numpy()
SCREAMING_SNAKE_CASE__: Any= 16_000
SCREAMING_SNAKE_CASE__: str= batch['''text''']
return batch
SCREAMING_SNAKE_CASE__: Optional[int]= train_dataset.map(
snake_case_ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
SCREAMING_SNAKE_CASE__: Tuple= eval_dataset.map(
snake_case_ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(snake_case_ : Dict ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), F'Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'
SCREAMING_SNAKE_CASE__: Tuple= processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(snake_case_ )
return batch
SCREAMING_SNAKE_CASE__: Union[str, Any]= train_dataset.map(
snake_case_ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=snake_case_ , num_proc=data_args.preprocessing_num_workers , )
SCREAMING_SNAKE_CASE__: str= eval_dataset.map(
snake_case_ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=snake_case_ , num_proc=data_args.preprocessing_num_workers , )
# Metric
SCREAMING_SNAKE_CASE__: str= datasets.load_metric('''wer''' )
def compute_metrics(snake_case_ : Any ):
SCREAMING_SNAKE_CASE__: Optional[Any]= pred.predictions
SCREAMING_SNAKE_CASE__: Any= np.argmax(snake_case_ , axis=-1 )
SCREAMING_SNAKE_CASE__: Any= processor.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE__: Optional[Any]= processor.batch_decode(snake_case_ )
# we do not want to group tokens when computing the metrics
SCREAMING_SNAKE_CASE__: Dict= processor.batch_decode(pred.label_ids , group_tokens=snake_case_ )
SCREAMING_SNAKE_CASE__: List[Any]= wer_metric.compute(predictions=snake_case_ , references=snake_case_ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
SCREAMING_SNAKE_CASE__: List[Any]= DataCollatorCTCWithPadding(processor=snake_case_ , padding=snake_case_ )
# Initialize our Trainer
SCREAMING_SNAKE_CASE__: Optional[Any]= CTCTrainer(
model=snake_case_ , data_collator=snake_case_ , args=snake_case_ , compute_metrics=snake_case_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
SCREAMING_SNAKE_CASE__: Dict= last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
SCREAMING_SNAKE_CASE__: Any= model_args.model_name_or_path
else:
SCREAMING_SNAKE_CASE__: Optional[Any]= None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
SCREAMING_SNAKE_CASE__: Union[str, Any]= trainer.train(resume_from_checkpoint=snake_case_ )
trainer.save_model()
SCREAMING_SNAKE_CASE__: List[str]= train_result.metrics
SCREAMING_SNAKE_CASE__: Optional[int]= (
data_args.max_train_samples if data_args.max_train_samples is not None else len(snake_case_ )
)
SCREAMING_SNAKE_CASE__: Union[str, Any]= min(snake_case_ , len(snake_case_ ) )
trainer.log_metrics('''train''' , snake_case_ )
trainer.save_metrics('''train''' , snake_case_ )
trainer.save_state()
# Evaluation
SCREAMING_SNAKE_CASE__: Union[str, Any]= {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE__: Optional[int]= trainer.evaluate()
SCREAMING_SNAKE_CASE__: Dict= data_args.max_val_samples if data_args.max_val_samples is not None else len(snake_case_ )
SCREAMING_SNAKE_CASE__: Tuple= min(snake_case_ , len(snake_case_ ) )
trainer.log_metrics('''eval''' , snake_case_ )
trainer.save_metrics('''eval''' , snake_case_ )
return results
if __name__ == "__main__":
main()
| 64 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCamelCase : int = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 0 |
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=None ):
'''simple docstring'''
UpperCAmelCase__ : Any = None
if token is not None:
UpperCAmelCase__ : List[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
UpperCAmelCase__ : List[str] = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
UpperCAmelCase__ : Optional[int] = requests.get(__UpperCamelCase , headers=__UpperCamelCase ).json()
UpperCAmelCase__ : List[str] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
UpperCAmelCase__ : Tuple = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__UpperCamelCase ):
UpperCAmelCase__ : List[str] = requests.get(url + F"&page={i + 2}" , headers=__UpperCamelCase ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=None ):
'''simple docstring'''
UpperCAmelCase__ : Dict = None
if token is not None:
UpperCAmelCase__ : Union[str, Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
UpperCAmelCase__ : Optional[Any] = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
UpperCAmelCase__ : List[Any] = requests.get(__UpperCamelCase , headers=__UpperCamelCase ).json()
UpperCAmelCase__ : Optional[Any] = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
UpperCAmelCase__ : Optional[int] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__UpperCamelCase ):
UpperCAmelCase__ : int = requests.get(url + F"&page={i + 2}" , headers=__UpperCamelCase ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = None
if token is not None:
UpperCAmelCase__ : int = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
UpperCAmelCase__ : Optional[int] = requests.get(__UpperCamelCase , headers=__UpperCamelCase , allow_redirects=__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = result.headers["""Location"""]
UpperCAmelCase__ : str = requests.get(__UpperCamelCase , allow_redirects=__UpperCamelCase )
UpperCAmelCase__ : Any = os.path.join(__UpperCamelCase , F"{artifact_name}.zip" )
with open(__UpperCamelCase , """wb""" ) as fp:
fp.write(response.content )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : List[Any] = None
with zipfile.ZipFile(__UpperCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(__UpperCamelCase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__UpperCamelCase ) as f:
for line in f:
UpperCAmelCase__ : str = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
UpperCAmelCase__ : List[Any] = line[: line.index(""": """ )]
UpperCAmelCase__ : int = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
UpperCAmelCase__ : Any = line[len("""FAILED """ ) :]
failed_tests.append(__UpperCamelCase )
elif filename == "job_name.txt":
UpperCAmelCase__ : Any = line
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError(
F"`errors` and `failed_tests` should have the same number of elements. Got {len(__UpperCamelCase )} for `errors` "
F"and {len(__UpperCamelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
""" problem.""" )
UpperCAmelCase__ : List[str] = None
if job_name and job_links:
UpperCAmelCase__ : str = job_links.get(__UpperCamelCase , __UpperCamelCase )
# A list with elements of the form (line of error, error, failed test)
UpperCAmelCase__ : Union[str, Any] = [x + [y] + [job_link] for x, y in zip(__UpperCamelCase , __UpperCamelCase )]
return result
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Any = [os.path.join(__UpperCamelCase , __UpperCamelCase ) for p in os.listdir(__UpperCamelCase ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__UpperCamelCase , job_links=__UpperCamelCase ) )
return errors
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=None ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = Counter()
counter.update([x[1] for x in logs] )
UpperCAmelCase__ : List[Any] = counter.most_common()
UpperCAmelCase__ : Optional[int] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
UpperCAmelCase__ : Any = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
UpperCAmelCase__ : Optional[Any] = dict(sorted(r.items() , key=lambda __UpperCamelCase : item[1]["count"] , reverse=__UpperCamelCase ) )
return r
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : int = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
UpperCAmelCase__ : List[Any] = test.split("""/""" )[2]
else:
UpperCAmelCase__ : List[str] = None
return test
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=None ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
UpperCAmelCase__ : int = [x for x in logs if x[2] is not None]
UpperCAmelCase__ : Any = {x[2] for x in logs}
UpperCAmelCase__ : List[str] = {}
for test in tests:
UpperCAmelCase__ : str = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
UpperCAmelCase__ : List[Any] = counter.most_common()
UpperCAmelCase__ : int = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
UpperCAmelCase__ : Dict = sum(error_counts.values() )
if n_errors > 0:
UpperCAmelCase__ : Any = {"""count""": n_errors, """errors""": error_counts}
UpperCAmelCase__ : int = dict(sorted(r.items() , key=lambda __UpperCamelCase : item[1]["count"] , reverse=__UpperCamelCase ) )
return r
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Dict = """| no. | error | status |"""
UpperCAmelCase__ : Tuple = """|-:|:-|:-|"""
UpperCAmelCase__ : Tuple = [header, sep]
for error in reduced_by_error:
UpperCAmelCase__ : List[str] = reduced_by_error[error]["""count"""]
UpperCAmelCase__ : Optional[int] = F"| {count} | {error[:100]} | |"
lines.append(__UpperCamelCase )
return "\n".join(__UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = """| model | no. of errors | major error | count |"""
UpperCAmelCase__ : Dict = """|-:|-:|-:|-:|"""
UpperCAmelCase__ : Dict = [header, sep]
for model in reduced_by_model:
UpperCAmelCase__ : Union[str, Any] = reduced_by_model[model]["""count"""]
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = list(reduced_by_model[model]["""errors"""].items() )[0]
UpperCAmelCase__ : List[Any] = F"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(__UpperCamelCase )
return "\n".join(__UpperCamelCase )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
__UpperCAmelCase = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__UpperCAmelCase = get_job_links(args.workflow_run_id, token=args.token)
__UpperCAmelCase = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__UpperCAmelCase = k.find(' / ')
__UpperCAmelCase = k[index + len(' / ') :]
__UpperCAmelCase = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__UpperCAmelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__UpperCAmelCase = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__UpperCAmelCase = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__UpperCAmelCase = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__UpperCAmelCase = reduce_by_error(errors)
__UpperCAmelCase = reduce_by_model(errors)
__UpperCAmelCase = make_github_table(reduced_by_error)
__UpperCAmelCase = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 65 |
import random
from .binary_exp_mod import bin_exp_mod
def a_ ( __lowercase : int , __lowercase : Any=1_000 ) -> int:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_snake_case = n - 1
_snake_case = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_snake_case = 0
while count < prec:
_snake_case = random.randint(2 , n - 1 )
_snake_case = bin_exp_mod(__lowercase , __lowercase , __lowercase )
if b != 1:
_snake_case = True
for _ in range(__lowercase ):
if b == n - 1:
_snake_case = False
break
_snake_case = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_lowerCamelCase : Tuple = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i))) | 686 | 0 |
from __future__ import annotations
UpperCamelCase = list[list[int]]
# assigning initial values to the grid
UpperCamelCase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCamelCase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Matrix | None:
if location := find_empty_location(SCREAMING_SNAKE_CASE ):
_lowercase , _lowercase : Any = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : Optional[Any] = digit
if sudoku(SCREAMING_SNAKE_CASE ) is not None:
return grid
_lowercase : List[Any] = 0
return None
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> None:
for row in grid:
for cell in row:
print(SCREAMING_SNAKE_CASE , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
UpperCamelCase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 66 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_lowerCamelCase : int = re.compile(r'''\s+''')
def a_ ( __lowercase : List[Any] ) -> int:
return {"hash": hashlib.mda(re.sub(__lowercase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def a_ ( __lowercase : List[Any] ) -> Dict:
_snake_case = [len(__lowercase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(__lowercase ), "line_max": max(__lowercase )}
def a_ ( __lowercase : Optional[int] ) -> List[str]:
_snake_case = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] ) -> Optional[int]:
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def a_ ( __lowercase : Union[str, Any] , __lowercase : int=5 ) -> Optional[Any]:
_snake_case = ['auto-generated', 'autogenerated', 'automatically generated']
_snake_case = example['content'].splitlines()
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a_ ( __lowercase : List[Any] , __lowercase : int=5 , __lowercase : Tuple=0.0_5 ) -> Union[str, Any]:
_snake_case = ['unit tests', 'test file', 'configuration file']
_snake_case = example['content'].splitlines()
_snake_case = 0
_snake_case = 0
# first test
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_snake_case = example['content'].count('\n' )
_snake_case = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a_ ( __lowercase : Union[str, Any] ) -> Any:
_snake_case = ['def ', 'class ', 'for ', 'while ']
_snake_case = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a_ ( __lowercase : Tuple , __lowercase : Any=4 ) -> List[str]:
_snake_case = example['content'].splitlines()
_snake_case = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a_ ( __lowercase : Dict ) -> Dict:
_snake_case = tokenizer(example['content'] , truncation=__lowercase )['input_ids']
_snake_case = len(example['content'] ) / len(__lowercase )
return {"ratio": ratio}
def a_ ( __lowercase : Optional[Any] ) -> Any:
_snake_case = {}
results.update(get_hash(__lowercase ) )
results.update(line_stats(__lowercase ) )
results.update(alpha_stats(__lowercase ) )
results.update(char_token_ratio(__lowercase ) )
results.update(is_autogenerated(__lowercase ) )
results.update(is_config_or_test(__lowercase ) )
results.update(has_no_keywords(__lowercase ) )
results.update(has_few_assignments(__lowercase ) )
return results
def a_ ( __lowercase : Optional[int] , __lowercase : str , __lowercase : List[Any] ) -> int:
if not check_uniques(__lowercase , __lowercase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a_ ( __lowercase : Dict ) -> Dict:
with open(__lowercase , 'rb' ) as f_in:
with gzip.open(str(__lowercase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(__lowercase , __lowercase )
os.unlink(__lowercase )
# Settings
_lowerCamelCase : Dict = HfArgumentParser(PreprocessingArguments)
_lowerCamelCase : Dict = parser.parse_args()
if args.num_workers is None:
_lowerCamelCase : int = multiprocessing.cpu_count()
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_lowerCamelCase : Any = time.time()
_lowerCamelCase : Optional[Any] = load_dataset(args.dataset_name, split='''train''')
print(F'Time to load dataset: {time.time()-t_start:.2f}')
# Run preprocessing
_lowerCamelCase : Optional[int] = time.time()
_lowerCamelCase : Union[str, Any] = ds.map(preprocess, num_proc=args.num_workers)
print(F'Time to preprocess dataset: {time.time()-t_start:.2f}')
# Deduplicate hashes
_lowerCamelCase : List[Any] = set(ds.unique('''hash'''))
_lowerCamelCase : Dict = len(uniques) / len(ds)
print(F'Fraction of duplicates: {1-frac:.2%}')
# Deduplicate data and apply heuristics
_lowerCamelCase : List[Any] = time.time()
_lowerCamelCase : Optional[int] = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F'Time to filter dataset: {time.time()-t_start:.2f}')
print(F'Size of filtered dataset: {len(ds_filter)}')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_lowerCamelCase : Union[str, Any] = time.time()
_lowerCamelCase , _lowerCamelCase : Dict = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'Time to deduplicate dataset: {time.time()-t_start:.2f}')
print(F'Size of deduplicate dataset: {len(ds_filter)}')
# Save data in batches of samples_per_file
_lowerCamelCase : Optional[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
_lowerCamelCase : int = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
_lowerCamelCase : Union[str, Any] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_lowerCamelCase : Dict = str(data_dir / F'file-{file_number+1:012}.json')
_lowerCamelCase : str = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'Time to save dataset: {time.time()-t_start:.2f}') | 686 | 0 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class A_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : float
SCREAMING_SNAKE_CASE_ : TreeNode | None = None
SCREAMING_SNAKE_CASE_ : TreeNode | None = None
def SCREAMING_SNAKE_CASE__ ( snake_case__ :TreeNode | None ) -> bool:
# Validation
def is_valid_tree(snake_case__ :TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(snake_case__ , snake_case__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(snake_case__ ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
snake_case__ :TreeNode | None , snake_case__ :float , snake_case__ :float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , snake_case__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , snake_case__ )
)
return is_binary_search_tree_recursive_check(snake_case__ , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 67 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : int = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = "yolos"
def __init__( self : int , lowercase : List[str]=768 , lowercase : Tuple=12 , lowercase : int=12 , lowercase : int=3_072 , lowercase : Optional[int]="gelu" , lowercase : str=0.0 , lowercase : Optional[int]=0.0 , lowercase : Optional[Any]=0.02 , lowercase : List[str]=1E-12 , lowercase : Dict=[512, 864] , lowercase : Union[str, Any]=16 , lowercase : List[Any]=3 , lowercase : List[str]=True , lowercase : Optional[int]=100 , lowercase : int=True , lowercase : Dict=False , lowercase : str=1 , lowercase : int=5 , lowercase : Tuple=2 , lowercase : List[str]=5 , lowercase : Any=2 , lowercase : List[str]=0.1 , **lowercase : int , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = qkv_bias
_snake_case = num_detection_tokens
_snake_case = use_mid_position_embeddings
_snake_case = auxiliary_loss
# Hungarian matcher
_snake_case = class_cost
_snake_case = bbox_cost
_snake_case = giou_cost
# Loss coefficients
_snake_case = bbox_loss_coefficient
_snake_case = giou_loss_coefficient
_snake_case = eos_coefficient
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = version.parse("1.11" )
@property
def A ( self : str ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Any ):
'''simple docstring'''
return 1E-4
@property
def A ( self : List[Any] ):
'''simple docstring'''
return 12 | 686 | 0 |
from __future__ import annotations
import bisect
def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> int:
"""simple docstring"""
if hi < 0:
__UpperCAmelCase =len(A_ )
while lo < hi:
__UpperCAmelCase =lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__UpperCAmelCase =mid + 1
else:
__UpperCAmelCase =mid
return lo
def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> int:
"""simple docstring"""
if hi < 0:
__UpperCAmelCase =len(A_ )
while lo < hi:
__UpperCAmelCase =lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__UpperCAmelCase =mid + 1
else:
__UpperCAmelCase =mid
return lo
def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_left(A_ , A_ , A_ , A_ ) , A_ )
def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_right(A_ , A_ , A_ , A_ ) , A_ )
def lowercase__ ( A_: list[int] , A_: int ) -> int | None:
"""simple docstring"""
__UpperCAmelCase =0
__UpperCAmelCase =len(A_ ) - 1
while left <= right:
__UpperCAmelCase =left + (right - left) // 2
__UpperCAmelCase =sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__UpperCAmelCase =midpoint - 1
else:
__UpperCAmelCase =midpoint + 1
return None
def lowercase__ ( A_: list[int] , A_: int ) -> int | None:
"""simple docstring"""
__UpperCAmelCase =bisect.bisect_left(A_ , A_ )
if index != len(A_ ) and sorted_collection[index] == item:
return index
return None
def lowercase__ ( A_: list[int] , A_: int , A_: int , A_: int ) -> int | None:
"""simple docstring"""
if right < left:
return None
__UpperCAmelCase =left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(A_ , A_ , A_ , midpoint - 1 )
else:
return binary_search_by_recursion(A_ , A_ , midpoint + 1 , A_ )
if __name__ == "__main__":
__A = input("Enter numbers separated by comma:\n").strip()
__A = sorted(int(item) for item in user_input.split(","))
__A = int(input("Enter a single number to be found in the list:\n"))
__A = binary_search(collection, target)
if result is None:
print(F"""{target} was not found in {collection}.""")
else:
print(F"""{target} was found at position {result} in {collection}.""")
| 68 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_lowerCamelCase : Tuple = logging.get_logger(__name__)
# General docstring
_lowerCamelCase : Union[str, Any] = '''ResNetConfig'''
# Base docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[Any] = [1, 2_048, 7, 7]
# Image classification docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[int] = '''tiger cat'''
_lowerCamelCase : str = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 3 , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(
lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=kernel_size // 2 , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def A ( self : Union[str, Any] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def A ( self : Tuple , lowercase : Tensor ):
'''simple docstring'''
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowercase )
_snake_case = self.pooler(lowercase )
return embedding
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase : int , lowercase : int , lowercase : int = 2 ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(lowercase , lowercase , kernel_size=1 , stride=lowercase , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : List[str] , lowercase : List[str] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" , lowercase : int = 4 ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , kernel_size=1 ) , ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : Dict , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowercase : ResNetConfig , lowercase : int , lowercase : int , lowercase : int = 2 , lowercase : int = 2 , ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , stride=lowercase , activation=config.hidden_act ) , *[layer(lowercase , lowercase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase , config.depths[1:] ):
self.stages.append(ResNetStage(lowercase , lowercase , lowercase , depth=lowercase ) )
def A ( self : str , lowercase : Tensor , lowercase : bool = False , lowercase : bool = True ):
'''simple docstring'''
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowercase )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase , hidden_states=lowercase , )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = ResNetConfig
_UpperCAmelCase : Tuple = "resnet"
_UpperCAmelCase : Optional[Any] = "pixel_values"
_UpperCAmelCase : Dict = True
def A ( self : List[str] , lowercase : Dict ):
'''simple docstring'''
if isinstance(lowercase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowercase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def A ( self : Tuple , lowercase : List[Any] , lowercase : Optional[Any]=False ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
_snake_case = value
_lowerCamelCase : str = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCamelCase : int = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : Any ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : Union[str, Any] , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowercase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase : int ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowercase )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Union[str, Any] , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[torch.LongTensor] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowercase )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowercase , lowercase )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : Union[str, Any] ):
'''simple docstring'''
super().__init__(lowercase )
super()._init_backbone(lowercase )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@replace_return_docstrings(output_type=lowercase , config_class=_CONFIG_FOR_DOC )
def A ( self : Dict , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowercase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowercase , ) | 686 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> float:
__snake_case = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __UpperCAmelCase ( ) -> List[Any]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 0 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = BertTokenizer
UpperCamelCase = BertTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = filter_non_english
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
super().setUp()
lowerCamelCase_ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a__ ( self : Tuple , A_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = 'unwanted, running'
return input_text, output_text
def a__ ( self : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(A_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [9, 6, 7, 12, 10, 11] )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = tokenizer.tokenize(A_ )
lowerCamelCase_ = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = tokenizer.encode(A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
# With lower casing
lowerCamelCase_ = self.get_tokenizer(do_lower_case=A_ )
lowerCamelCase_ = self.get_rust_tokenizer(do_lower_case=A_ )
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = tokenizer.tokenize(A_ )
lowerCamelCase_ = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = tokenizer.encode(A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def a__ ( self : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : str ) -> int:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : int ) -> Any:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer()
lowerCamelCase_ = 'a\n\'ll !!to?\'d of, can\'t.'
lowerCamelCase_ = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(A_ ) , A_ )
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowerCamelCase_ = {}
for i, token in enumerate(A_ ):
lowerCamelCase_ = i
lowerCamelCase_ = WordpieceTokenizer(vocab=A_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def a__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def a__ ( self : Any ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class.from_pretrained('bert-base-uncased' )
lowerCamelCase_ = tokenizer.encode('sequence builders' , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer.encode('multi-sequence build' , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def a__ ( self : str ) -> str:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
lowerCamelCase_ = tokenizer_r.encode_plus(
A_ , return_attention_mask=A_ , return_token_type_ids=A_ , return_offsets_mapping=A_ , add_special_tokens=A_ , )
lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(A_ , 'do_lower_case' ) else False
lowerCamelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = ['的', '人', '有']
lowerCamelCase_ = ''.join(A_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = tokenizer_p.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer_r.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(A_ )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = False
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = tokenizer_r.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer_p.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(A_ )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(A_ )
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
| 70 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def a_ ( __lowercase : Union[str, Any] ) -> List[Any]:
_snake_case = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
_snake_case = DetaConfig(
backbone_config=__lowercase , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=__lowercase , with_box_refine=__lowercase , two_stage=__lowercase , )
# set labels
_snake_case = 'huggingface/label-files'
if "o365" in model_name:
_snake_case = 366
_snake_case = 'object365-id2label.json'
else:
_snake_case = 91
_snake_case = 'coco-detection-id2label.json'
_snake_case = num_labels
_snake_case = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type='dataset' ) ) , 'r' ) )
_snake_case = {int(__lowercase ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def a_ ( __lowercase : int ) -> str:
_snake_case = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : str ) -> Union[str, Any]:
_snake_case = dct.pop(__lowercase )
_snake_case = val
def a_ ( __lowercase : List[str] , __lowercase : str ) -> Dict:
_snake_case = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_snake_case = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:dim, :]
_snake_case = in_proj_bias[: dim]
_snake_case = in_proj_weight[
dim : dim * 2, :
]
_snake_case = in_proj_bias[
dim : dim * 2
]
_snake_case = in_proj_weight[
-dim :, :
]
_snake_case = in_proj_bias[-dim :]
# fmt: on
def a_ ( __lowercase : Dict , __lowercase : Dict ) -> str:
# transformer decoder self-attention layers
_snake_case = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:hidden_size, :]
_snake_case = in_proj_bias[:hidden_size]
_snake_case = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_snake_case = in_proj_bias[hidden_size : hidden_size * 2]
_snake_case = in_proj_weight[-hidden_size:, :]
_snake_case = in_proj_bias[-hidden_size:]
def a_ ( ) -> List[str]:
_snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a_ ( __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Tuple ) -> Optional[Any]:
_snake_case = get_deta_config(__lowercase )
# load original state dict
if model_name == "deta-swin-large":
_snake_case = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
_snake_case = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
_snake_case = torch.load(__lowercase , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(__lowercase , param.shape )
# rename keys
_snake_case = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_swin_q_k_v(__lowercase , config.backbone_config )
read_in_decoder_q_k_v(__lowercase , __lowercase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "input_proj" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
# finally, create HuggingFace model and load state dict
_snake_case = DetaForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
_snake_case = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(__lowercase )
# load image processor
_snake_case = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
_snake_case = prepare_img()
_snake_case = processor(images=__lowercase , return_tensors='pt' )
_snake_case = encoding['pixel_values']
_snake_case = model(pixel_values.to(__lowercase ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_snake_case = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
_snake_case = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
_snake_case = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
_snake_case = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__lowercase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__lowercase ) , atol=1E-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 686 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case (metaclass=__SCREAMING_SNAKE_CASE):
__A : Any =["speech"]
def __init__( self ,*_snake_case ,**_snake_case ):
requires_backends(self ,["speech"] )
class _snake_case (metaclass=__SCREAMING_SNAKE_CASE):
__A : Dict =["speech"]
def __init__( self ,*_snake_case ,**_snake_case ):
requires_backends(self ,["speech"] )
| 71 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_lowerCamelCase : Dict = '''pt'''
elif is_tf_available():
_lowerCamelCase : List[str] = '''tf'''
else:
_lowerCamelCase : List[Any] = '''jax'''
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = PerceiverTokenizer
_UpperCAmelCase : Optional[int] = False
def A ( self : Tuple ):
'''simple docstring'''
super().setUp()
_snake_case = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A ( self : str ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def A ( self : Optional[int] , **lowercase : Dict ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : Optional[int] , lowercase : Tuple , lowercase : Optional[Any]=False , lowercase : int=20 , lowercase : Optional[int]=5 ):
'''simple docstring'''
_snake_case = []
for i in range(len(lowercase ) ):
try:
_snake_case = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_snake_case = list(filter(lambda lowercase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , lowercase ) )
_snake_case = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
_snake_case = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
_snake_case = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case = [t[0] for t in toks]
# Ensure consistency
_snake_case = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
_snake_case = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
_snake_case = ' ' + output_txt
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = 'Unicode €.'
_snake_case = tokenizer(lowercase )
_snake_case = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_snake_case = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]Unicode €.[SEP]' )
_snake_case = tokenizer('e è é ê ë' )
_snake_case = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_snake_case = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_snake_case = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
if FRAMEWORK != "jax":
_snake_case = list(batch.input_ids.numpy()[0] )
else:
_snake_case = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , lowercase )
self.assertIn('attention_mask' , lowercase )
self.assertNotIn('decoder_input_ids' , lowercase )
self.assertNotIn('decoder_attention_mask' , lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = [
'Summary of the text.',
'Another summary.',
]
_snake_case = tokenizer(
text_target=lowercase , max_length=32 , padding='max_length' , truncation=lowercase , return_tensors=lowercase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_snake_case = tokenizer.__class__.from_pretrained(lowercase )
_snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
shutil.rmtree(lowercase )
_snake_case = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_snake_case = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_snake_case = tokenizer.__class__.from_pretrained(lowercase )
_snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_snake_case = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowercase )
_snake_case = [f'''<extra_id_{i}>''' for i in range(125 )]
_snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
_snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case = tokenizer_class.from_pretrained(
lowercase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=lowercase )]
_snake_case = tokenizer_class.from_pretrained(
lowercase , additional_special_tokens=lowercase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
pass
def A ( self : List[str] ):
'''simple docstring'''
pass
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : int ):
'''simple docstring'''
_snake_case = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_snake_case = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
_snake_case = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(lowercase , lowercase ) | 686 | 0 |
'''simple docstring'''
import copy
import re
class __magic_name__ :
UpperCamelCase__ = 'hp'
UpperCamelCase__ = {}
UpperCamelCase__ = None
@classmethod
def _A( cls , snake_case_ , snake_case_ ):
lowercase =prefix
lowercase =defaults
cls.build_naming_info()
@staticmethod
def _A( snake_case_ , snake_case_ ):
if len(snake_case_ ) == 0:
return ""
lowercase =None
if any(char.isdigit() for char in word ):
raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(snake_case_ ) + 1 ):
lowercase =word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
lowercase =prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(snake_case_ ):
lowercase =''''''
while integer != 0:
lowercase =chr(ord('''A''' ) + integer % 10 ) + s
integer //= 10
return s
lowercase =0
while True:
lowercase =word + '''#''' + int_to_alphabetic(snake_case_ )
if sword in info["reverse_short_word"]:
continue
else:
lowercase =sword
break
lowercase =short_word
lowercase =word
return short_word
@staticmethod
def _A( snake_case_ , snake_case_ ):
lowercase =param_name.split('''_''' )
lowercase =[TrialShortNamer.shortname_for_word(snake_case_ , snake_case_ ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
lowercase =['''''', '''_''']
for separator in separators:
lowercase =separator.join(snake_case_ )
if shortname not in info["reverse_short_param"]:
lowercase =shortname
lowercase =param_name
return shortname
return param_name
@staticmethod
def _A( snake_case_ , snake_case_ ):
lowercase =TrialShortNamer.shortname_for_key(snake_case_ , snake_case_ )
lowercase =short_name
lowercase =param_name
@classmethod
def _A( cls ):
if cls.NAMING_INFO is not None:
return
lowercase ={
'''short_word''': {},
'''reverse_short_word''': {},
'''short_param''': {},
'''reverse_short_param''': {},
}
lowercase =list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(snake_case_ , snake_case_ )
lowercase =info
@classmethod
def _A( cls , snake_case_ ):
cls.build_naming_info()
assert cls.PREFIX is not None
lowercase =[copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'You should provide a default value for the param name {k} with value {v}' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
lowercase =cls.NAMING_INFO['''short_param'''][k]
if isinstance(snake_case_ , snake_case_ ):
lowercase =1 if v else 0
lowercase ='''''' if isinstance(snake_case_ , (int, float) ) else '''-'''
lowercase =f'{key}{sep}{v}'
name.append(snake_case_ )
return "_".join(snake_case_ )
@classmethod
def _A( cls , snake_case_ ):
lowercase =repr[len(cls.PREFIX ) + 1 :]
if repr == "":
lowercase =[]
else:
lowercase =repr.split('''_''' )
lowercase ={}
for value in values:
if "-" in value:
lowercase , lowercase =value.split('''-''' )
else:
lowercase =re.sub('''[0-9.]''' , '''''' , snake_case_ )
lowercase =float(re.sub('''[^0-9.]''' , '''''' , snake_case_ ) )
lowercase =cls.NAMING_INFO['''reverse_short_param'''][p_k]
lowercase =p_v
for k in cls.DEFAULTS:
if k not in parameters:
lowercase =cls.DEFAULTS[k]
return parameters
| 72 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def a_ ( ) -> Optional[int]:
_snake_case , _snake_case = 9, 14 # noqa: F841
_snake_case = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_snake_case = defaultdict(__lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_snake_case = mst(__lowercase )
_snake_case = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_snake_case = tuple(answer[:2] )
_snake_case = tuple(edge[::-1] )
assert edge in result or reverse in result | 686 | 0 |
import numpy as np
def lowerCamelCase__ (_UpperCAmelCase):
return 1 / (1 + np.exp(-vector))
def lowerCamelCase__ (_UpperCAmelCase):
return vector * sigmoid(_UpperCAmelCase)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Tuple = ["transformers", "torch", "note_seq"]
def __init__( self : List[Any] , *lowercase : List[Any] , **lowercase : Dict ):
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] ) | 686 | 0 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase_ = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''input_values''', '''attention_mask''']
def __init__( self : Tuple , _A : int = 1 , _A : int = 1_6000 , _A : float = 0.0 , _A : bool = False , _A : int = 80 , _A : int = 16 , _A : int = 64 , _A : str = "hann_window" , _A : float = 1.0 , _A : float = 80 , _A : float = 7600 , _A : float = 1e-10 , _A : int = 2 , _A : bool = True , **_A : Optional[Any] , ):
"""simple docstring"""
super().__init__(feature_size=_A , sampling_rate=_A , padding_value=_A , **_A )
__SCREAMING_SNAKE_CASE : List[Any] = do_normalize
__SCREAMING_SNAKE_CASE : Optional[Any] = return_attention_mask
__SCREAMING_SNAKE_CASE : Optional[Any] = num_mel_bins
__SCREAMING_SNAKE_CASE : Dict = hop_length
__SCREAMING_SNAKE_CASE : Any = win_length
__SCREAMING_SNAKE_CASE : Union[str, Any] = win_function
__SCREAMING_SNAKE_CASE : str = frame_signal_scale
__SCREAMING_SNAKE_CASE : Tuple = fmin
__SCREAMING_SNAKE_CASE : Any = fmax
__SCREAMING_SNAKE_CASE : Dict = mel_floor
__SCREAMING_SNAKE_CASE : Union[str, Any] = reduction_factor
__SCREAMING_SNAKE_CASE : List[str] = win_length * sampling_rate // 1000
__SCREAMING_SNAKE_CASE : List[Any] = hop_length * sampling_rate // 1000
__SCREAMING_SNAKE_CASE : Union[str, Any] = optimal_fft_length(self.sample_size )
__SCREAMING_SNAKE_CASE : str = (self.n_fft // 2) + 1
__SCREAMING_SNAKE_CASE : Optional[int] = window_function(window_length=self.sample_size , name=self.win_function , periodic=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , )
if frame_signal_scale != 1.0:
warnings.warn(
'''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , _A , )
if reduction_factor != 2.0:
warnings.warn(
'''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , _A , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCAmelCase__ ( _A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = np.array(_A , np.intaa )
__SCREAMING_SNAKE_CASE : List[Any] = []
for vector, length in zip(_A , attention_mask.sum(-1 ) ):
__SCREAMING_SNAKE_CASE : Tuple = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
__SCREAMING_SNAKE_CASE : Any = padding_value
normed_input_values.append(_A )
else:
__SCREAMING_SNAKE_CASE : int = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def UpperCAmelCase__ ( self : Any , _A : np.ndarray , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = spectrogram(
_A , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , )
return log_mel_spec.T
def __call__( self : Dict , _A : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _A : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _A : Union[bool, str, PaddingStrategy] = False , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[int] = None , **_A : str , ):
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError('''You must provide either `audio` or `audio_target` values.''' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if audio is not None:
__SCREAMING_SNAKE_CASE : str = self._process_audio(
_A , _A , _A , _A , _A , _A , _A , _A , **_A , )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if audio_target is not None:
__SCREAMING_SNAKE_CASE : List[Any] = self._process_audio(
_A , _A , _A , _A , _A , _A , _A , _A , **_A , )
if inputs is None:
return inputs_target
else:
__SCREAMING_SNAKE_CASE : str = inputs_target['''input_values''']
__SCREAMING_SNAKE_CASE : Dict = inputs_target.get('''attention_mask''' )
if decoder_attention_mask is not None:
__SCREAMING_SNAKE_CASE : Tuple = decoder_attention_mask
return inputs
def UpperCAmelCase__ ( self : Tuple , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = False , _A : Union[bool, str, PaddingStrategy] = False , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[Union[str, TensorType]] = None , **_A : str , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = isinstance(_A , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
__SCREAMING_SNAKE_CASE : int = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__SCREAMING_SNAKE_CASE : Tuple = [np.asarray(_A , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
__SCREAMING_SNAKE_CASE : Any = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE : Tuple = speech.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE : Optional[int] = [speech]
# needed to make pad() work on spectrogram inputs
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_size
# convert into correct format for padding
if is_target:
__SCREAMING_SNAKE_CASE : Tuple = [self._extract_mel_features(_A ) for waveform in speech]
__SCREAMING_SNAKE_CASE : Tuple = BatchFeature({'''input_values''': features} )
__SCREAMING_SNAKE_CASE : Any = self.num_mel_bins
else:
__SCREAMING_SNAKE_CASE : Dict = BatchFeature({'''input_values''': speech} )
__SCREAMING_SNAKE_CASE : Dict = self.pad(
_A , padding=_A , max_length=_A , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=_A , **_A , )
__SCREAMING_SNAKE_CASE : List[Any] = feature_size_hack
# convert input values to correct format
__SCREAMING_SNAKE_CASE : str = padded_inputs['''input_values''']
if not isinstance(input_values[0] , np.ndarray ):
__SCREAMING_SNAKE_CASE : Any = [np.asarray(_A , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_A , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
__SCREAMING_SNAKE_CASE : List[Any] = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_A , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE : Any = input_values.astype(np.floataa )
# convert attention_mask to correct format
__SCREAMING_SNAKE_CASE : List[str] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(_A , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
__SCREAMING_SNAKE_CASE : Optional[Any] = (
attention_mask
if self._get_padding_strategies(_A , max_length=_A ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__SCREAMING_SNAKE_CASE : List[str] = self.zero_mean_unit_var_norm(
padded_inputs['''input_values'''] , attention_mask=_A , padding_value=self.padding_value )
if return_tensors is not None:
__SCREAMING_SNAKE_CASE : str = padded_inputs.convert_to_tensors(_A )
return padded_inputs
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = super().to_dict()
# Don't serialize these as they are derived from the other properties.
__SCREAMING_SNAKE_CASE : int = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs''']
for name in names:
if name in output:
del output[name]
return output
| 74 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a_ ( ) -> Optional[Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__lowercase ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def a_ ( ) -> Optional[int]:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def a_ ( ) -> Dict:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__lowercase ):
http_head('https://huggingface.co' ) | 686 | 0 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowerCamelCase_ ( __a ):
def __init__( self : Optional[int] , _A : Union[str, "sqlalchemy.sql.Selectable"] , _A : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , _A : Optional[Features] = None , _A : str = None , _A : bool = False , **_A : Dict , ):
'''simple docstring'''
super().__init__(features=_A , cache_dir=_A , keep_in_memory=_A , **_A )
UpperCAmelCase__ : int = Sql(
cache_dir=_A , features=_A , sql=_A , con=_A , **_A , )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : List[str] = None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , )
# Build dataset for splits
UpperCAmelCase__ : Optional[int] = self.builder.as_dataset(
split='''train''' , verification_mode=_A , in_memory=self.keep_in_memory )
return dataset
class lowerCamelCase_ :
def __init__( self : Any , _A : Dataset , _A : str , _A : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , _A : Optional[int] = None , _A : Optional[int] = None , **_A : Tuple , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
UpperCAmelCase__ : Any = dataset
UpperCAmelCase__ : int = name
UpperCAmelCase__ : Union[str, Any] = con
UpperCAmelCase__ : Any = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCAmelCase__ : int = num_proc
UpperCAmelCase__ : Optional[int] = to_sql_kwargs
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : int = self.to_sql_kwargs.pop('''sql''' , _A )
UpperCAmelCase__ : int = self.to_sql_kwargs.pop('''con''' , _A )
UpperCAmelCase__ : List[Any] = self.to_sql_kwargs.pop('''index''' , _A )
UpperCAmelCase__ : Optional[int] = self._write(index=_A , **self.to_sql_kwargs )
return written
def lowercase_ ( self : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = args
UpperCAmelCase__ : int = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
UpperCAmelCase__ : Union[str, Any] = query_table(
table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCAmelCase__ : Tuple = batch.to_pandas()
UpperCAmelCase__ : Tuple = df.to_sql(self.name , self.con , index=_A , **_A )
return num_rows or len(_A )
def lowercase_ ( self : Optional[Any] , _A : Optional[int] , **_A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Any = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 75 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCamelCase : Optional[int] = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
_lowerCamelCase : List[str] = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
_lowerCamelCase : Dict = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def A ( self : Union[str, Any] , lowercase : Tuple , lowercase : Optional[Any] , lowercase : int=None , lowercase : str=True , lowercase : List[str]=False ):
'''simple docstring'''
if rouge_types is None:
_snake_case = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
_snake_case = rouge_scorer.RougeScorer(rouge_types=lowercase , use_stemmer=lowercase )
if use_aggregator:
_snake_case = scoring.BootstrapAggregator()
else:
_snake_case = []
for ref, pred in zip(lowercase , lowercase ):
_snake_case = scorer.score(lowercase , lowercase )
if use_aggregator:
aggregator.add_scores(lowercase )
else:
scores.append(lowercase )
if use_aggregator:
_snake_case = aggregator.aggregate()
else:
_snake_case = {}
for key in scores[0]:
_snake_case = [score[key] for score in scores]
return result | 686 | 0 |
"""simple docstring"""
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
a_ = '3'
print('Python version:', sys.version)
print('OS platform:', platform.platform())
print('OS architecture:', platform.machine())
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
except ImportError:
print('Torch version:', None)
try:
import transformers
print('transformers version:', transformers.__version__)
except ImportError:
print('transformers version:', None)
| 76 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = "swin2sr"
_UpperCAmelCase : Optional[int] = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[int] , lowercase : List[Any]=64 , lowercase : int=1 , lowercase : Union[str, Any]=3 , lowercase : Dict=180 , lowercase : List[Any]=[6, 6, 6, 6, 6, 6] , lowercase : Dict=[6, 6, 6, 6, 6, 6] , lowercase : List[Any]=8 , lowercase : List[str]=2.0 , lowercase : Tuple=True , lowercase : Union[str, Any]=0.0 , lowercase : Dict=0.0 , lowercase : Optional[int]=0.1 , lowercase : int="gelu" , lowercase : List[str]=False , lowercase : List[Any]=0.02 , lowercase : List[Any]=1E-5 , lowercase : Optional[int]=2 , lowercase : Tuple=1.0 , lowercase : List[Any]="1conv" , lowercase : List[Any]="pixelshuffle" , **lowercase : List[str] , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = embed_dim
_snake_case = depths
_snake_case = len(lowercase )
_snake_case = num_heads
_snake_case = window_size
_snake_case = mlp_ratio
_snake_case = qkv_bias
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = drop_path_rate
_snake_case = hidden_act
_snake_case = use_absolute_embeddings
_snake_case = layer_norm_eps
_snake_case = initializer_range
_snake_case = upscale
_snake_case = img_range
_snake_case = resi_connection
_snake_case = upsampler | 686 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
A = logging.get_logger(__name__)
class a__ ( __magic_name__ ):
def __init__( self : Tuple , *UpperCamelCase_ : Any , **UpperCamelCase_ : Optional[int]):
"""simple docstring"""
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead." , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_)
| 77 |
import random
def a_ ( __lowercase : str , __lowercase : Any , __lowercase : Any ) -> Optional[Any]:
_snake_case = a[left_index]
_snake_case = left_index + 1
for j in range(left_index + 1 , __lowercase ):
if a[j] < pivot:
_snake_case , _snake_case = a[i], a[j]
i += 1
_snake_case , _snake_case = a[i - 1], a[left_index]
return i - 1
def a_ ( __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Optional[int] ) -> Tuple:
if left < right:
_snake_case = random.randint(__lowercase , right - 1 )
_snake_case , _snake_case = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_snake_case = partition(__lowercase , __lowercase , __lowercase )
quick_sort_random(
__lowercase , __lowercase , __lowercase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__lowercase , pivot_index + 1 , __lowercase ) # recursive quicksort to the right of the pivot point
def a_ ( ) -> str:
_snake_case = input('Enter numbers separated by a comma:\n' ).strip()
_snake_case = [int(__lowercase ) for item in user_input.split(',' )]
quick_sort_random(__lowercase , 0 , len(__lowercase ) )
print(__lowercase )
if __name__ == "__main__":
main() | 686 | 0 |
'''simple docstring'''
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
SCREAMING_SNAKE_CASE_: Any =DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
SCREAMING_SNAKE_CASE_: Dict ='main'
# Default branch name
SCREAMING_SNAKE_CASE_: Optional[int] ='f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
SCREAMING_SNAKE_CASE_: Optional[Any] ='aaaaaaa'
# This commit does not exist, so we should 404.
SCREAMING_SNAKE_CASE_: List[str] ='d9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
SCREAMING_SNAKE_CASE_: Dict ='4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def lowerCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
print("Welcome!" )
yield
print("Bye!" )
@contextlib.contextmanager
def lowerCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
print("Bonjour!" )
yield
print("Au revoir!" )
class __A ( unittest.TestCase ):
def _lowercase (self : Tuple ):
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("transformers" ) is not None
class __A ( unittest.TestCase ):
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def _lowercase (self : str , __a : List[Any] ):
with ContextManagers([] ):
print("Transformers are awesome!" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , "Transformers are awesome!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def _lowercase (self : Optional[Any] , __a : List[Any] ):
with ContextManagers([context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Welcome!\nTransformers are awesome!\nBye!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def _lowercase (self : Dict , __a : Union[str, Any] ):
with ContextManagers([context_fr(), context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n" )
@require_torch
def _lowercase (self : int ):
self.assertEqual(find_labels(__a ) , ["labels"] )
self.assertEqual(find_labels(__a ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(__a ) , ["start_positions", "end_positions"] )
class __A ( UpperCamelCase__ ):
pass
self.assertEqual(find_labels(__a ) , ["labels"] )
@require_tf
def _lowercase (self : Tuple ):
self.assertEqual(find_labels(__a ) , ["labels"] )
self.assertEqual(find_labels(__a ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(__a ) , ["start_positions", "end_positions"] )
class __A ( UpperCamelCase__ ):
pass
self.assertEqual(find_labels(__a ) , ["labels"] )
@require_flax
def _lowercase (self : Tuple ):
# Flax models don't have labels
self.assertEqual(find_labels(__a ) , [] )
self.assertEqual(find_labels(__a ) , [] )
self.assertEqual(find_labels(__a ) , [] )
class __A ( UpperCamelCase__ ):
pass
self.assertEqual(find_labels(__a ) , [] )
| 78 |
import math
def a_ ( __lowercase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a_ ( __lowercase : float = 0.1 ) -> int:
_snake_case = 3
_snake_case = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__lowercase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _lowerCamelCase ( __lowerCamelCase ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = 384
if "tiny" in model_name:
UpperCAmelCase__ : Dict = [3, 3, 9, 3]
UpperCAmelCase__ : int = [96, 192, 384, 768]
if "small" in model_name:
UpperCAmelCase__ : Optional[int] = [3, 3, 27, 3]
UpperCAmelCase__ : Dict = [96, 192, 384, 768]
if "base" in model_name:
UpperCAmelCase__ : str = [3, 3, 27, 3]
UpperCAmelCase__ : Optional[Any] = [128, 256, 512, 1024]
UpperCAmelCase__ : int = 512
if "large" in model_name:
UpperCAmelCase__ : List[str] = [3, 3, 27, 3]
UpperCAmelCase__ : Tuple = [192, 384, 768, 1536]
UpperCAmelCase__ : Any = 768
if "xlarge" in model_name:
UpperCAmelCase__ : int = [3, 3, 27, 3]
UpperCAmelCase__ : int = [256, 512, 1024, 2048]
UpperCAmelCase__ : int = 1024
# set label information
UpperCAmelCase__ : Tuple = 150
UpperCAmelCase__ : int = """huggingface/label-files"""
UpperCAmelCase__ : Tuple = """ade20k-id2label.json"""
UpperCAmelCase__ : int = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase__ : Any = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase__ : Dict = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ : Optional[int] = ConvNextConfig(
depths=__lowerCamelCase , hidden_sizes=__lowerCamelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
UpperCAmelCase__ : str = UperNetConfig(
backbone_config=__lowerCamelCase , auxiliary_in_channels=__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase , )
return config
def _lowerCamelCase ( __lowerCamelCase ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Dict = []
# fmt: off
# stem
rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") )
rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.stages.{i}.{j}.gamma", F"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.weight", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.bias", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.weight", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.bias", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias") )
if i > 0:
rename_keys.append((F"backbone.downsample_layers.{i}.0.weight", F"backbone.encoder.stages.{i}.downsampling_layer.0.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.0.bias", F"backbone.encoder.stages.{i}.downsampling_layer.0.bias") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.weight", F"backbone.encoder.stages.{i}.downsampling_layer.1.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.bias", F"backbone.encoder.stages.{i}.downsampling_layer.1.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
'''simple docstring'''
UpperCAmelCase__ : str = dct.pop(__lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = val
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = {
"""upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""",
"""upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""",
"""upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""",
"""upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""",
"""upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""",
}
UpperCAmelCase__ : List[Any] = model_name_to_url[model_name]
UpperCAmelCase__ : List[Any] = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location="""cpu""" )["""state_dict"""]
UpperCAmelCase__ : Dict = get_upernet_config(__lowerCamelCase )
UpperCAmelCase__ : int = UperNetForSemanticSegmentation(__lowerCamelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
UpperCAmelCase__ : Optional[Any] = state_dict.pop(__lowerCamelCase )
if "bn" in key:
UpperCAmelCase__ : int = key.replace("""bn""" , """batch_norm""" )
UpperCAmelCase__ : str = val
# rename keys
UpperCAmelCase__ : Optional[Any] = create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
# verify on image
UpperCAmelCase__ : Tuple = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
UpperCAmelCase__ : Tuple = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert("""RGB""" )
UpperCAmelCase__ : Dict = SegformerImageProcessor()
UpperCAmelCase__ : Union[str, Any] = processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
UpperCAmelCase__ : Any = model(__lowerCamelCase )
if model_name == "upernet-convnext-tiny":
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] )
elif model_name == "upernet-convnext-small":
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] )
elif model_name == "upernet-convnext-base":
UpperCAmelCase__ : int = torch.tensor(
[[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] )
elif model_name == "upernet-convnext-large":
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] )
elif model_name == "upernet-convnext-xlarge":
UpperCAmelCase__ : Union[str, Any] = torch.tensor(
[[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __lowerCamelCase , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__lowerCamelCase )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(F"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(F"openmmlab/{model_name}" )
processor.push_to_hub(F"openmmlab/{model_name}" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[f'''upernet-convnext-{size}''' for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 79 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = "resnet"
_UpperCAmelCase : Any = ["basic", "bottleneck"]
def __init__( self : Union[str, Any] , lowercase : Dict=3 , lowercase : Any=64 , lowercase : Any=[256, 512, 1_024, 2_048] , lowercase : Dict=[3, 4, 6, 3] , lowercase : Any="bottleneck" , lowercase : Optional[Any]="relu" , lowercase : Dict=False , lowercase : str=None , lowercase : Tuple=None , **lowercase : List[Any] , ):
'''simple docstring'''
super().__init__(**lowercase )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
_snake_case = num_channels
_snake_case = embedding_size
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = layer_type
_snake_case = hidden_act
_snake_case = downsample_in_first_stage
_snake_case = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(lowercase ) + 1 )]
_snake_case , _snake_case = get_aligned_output_features_output_indices(
out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = version.parse("1.11" )
@property
def A ( self : int ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
return 1E-3 | 686 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : Dict = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Tuple = 'lilt'
def __init__( self : Any , _lowerCAmelCase : str=3_0522 , _lowerCAmelCase : Tuple=768 , _lowerCAmelCase : Any=12 , _lowerCAmelCase : Dict=12 , _lowerCAmelCase : Tuple=3072 , _lowerCAmelCase : Any="gelu" , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : List[str]=512 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : Any=1e-12 , _lowerCAmelCase : Tuple=0 , _lowerCAmelCase : Optional[Any]="absolute" , _lowerCAmelCase : str=None , _lowerCAmelCase : Union[str, Any]=4 , _lowerCAmelCase : Union[str, Any]=1024 , **_lowerCAmelCase : Dict , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = classifier_dropout
__lowercase = channel_shrink_ratio
__lowercase = max_ad_position_embeddings
| 80 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : List[Any] , lowercase : Union[str, Any] , lowercase : int ):
'''simple docstring'''
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(lowercase ) for s in shape] )}.npy'''
def A ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def A ( self : List[Any] , lowercase : Tuple=0 , lowercase : Optional[int]=(4, 4, 64, 64) , lowercase : Optional[int]=False ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase )
return image
def A ( self : Tuple , lowercase : Any=False , lowercase : Union[str, Any]="CompVis/stable-diffusion-v1-4" ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = 'bf16' if fpaa else None
_snake_case , _snake_case = FlaxUNetaDConditionModel.from_pretrained(
lowercase , subfolder='unet' , dtype=lowercase , revision=lowercase )
return model, params
def A ( self : Union[str, Any] , lowercase : str=0 , lowercase : Optional[Any]=(4, 77, 768) , lowercase : int=False ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def A ( self : Tuple , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : List[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=lowercase )
_snake_case = self.get_latents(lowercase , fpaa=lowercase )
_snake_case = self.get_encoder_hidden_states(lowercase , fpaa=lowercase )
_snake_case = model.apply(
{'params': params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
_snake_case = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_snake_case = jnp.array(lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def A ( self : str , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : List[str] ):
'''simple docstring'''
_snake_case , _snake_case = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=lowercase )
_snake_case = self.get_latents(lowercase , shape=(4, 4, 96, 96) , fpaa=lowercase )
_snake_case = self.get_encoder_hidden_states(lowercase , shape=(4, 77, 1_024) , fpaa=lowercase )
_snake_case = model.apply(
{'params': params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
_snake_case = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_snake_case = jnp.array(lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1E-2 ) | 686 | 0 |
from __future__ import annotations
def lowerCAmelCase_ ( __lowerCamelCase ):
return len(set(__lowerCamelCase ) ) == len(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 81 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( __lowercase : Any ) -> List[Any]:
_snake_case = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a_ ( __lowercase : Dict ) -> Tuple:
_snake_case , _snake_case = emb.weight.shape
_snake_case = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_snake_case = emb.weight.data
return lin_layer
def a_ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None ) -> Tuple:
_snake_case = {}
for old_key in state_dict.keys():
_snake_case = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_snake_case = key.replace('moe_layer.experts.0' , f'''ffn.experts.expert_{expert_idx}''' )
else:
_snake_case = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
_snake_case = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
_snake_case = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
_snake_case = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
_snake_case = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
_snake_case = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
_snake_case = key.replace('final_layer_norm' , 'ff_layer_norm' )
_snake_case = state_dict[old_key]
return new_dict
def a_ ( __lowercase : Optional[Any] , __lowercase : Tuple , __lowercase : Any , __lowercase : List[str] , __lowercase : str = WEIGHTS_NAME ) -> Union[str, Any]:
_snake_case = []
_snake_case = 0
os.makedirs(__lowercase , exist_ok=__lowercase )
for expert in range(__lowercase ):
_snake_case = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(__lowercase ):
_snake_case = torch.load(__lowercase )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = os.path.join(
__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
torch.save(__lowercase , __lowercase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__lowercase )[0]].dtype )
# Add the last block
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
_snake_case = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__lowercase ) == 1:
_snake_case = os.path.join(__lowercase , __lowercase )
torch.save(__lowercase , __lowercase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__lowercase , __lowercase )
# Otherwise, let's build the index
_snake_case = {}
for idx, shard in enumerate(__lowercase ):
_snake_case = weights_name.replace('.bin' , f'''-{idx+1:05d}-of-{len(__lowercase ):05d}.bin''' )
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__lowercase , os.path.join(__lowercase , __lowercase ) )
for key in shard:
_snake_case = shard_file
# Add the metadata
_snake_case = {'total_size': total_size}
_snake_case = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(__lowercase , __lowercase ) , 'w' , encoding='utf-8' ) as f:
_snake_case = json.dumps(__lowercase , indent=2 , sort_keys=__lowercase ) + '\n'
f.write(__lowercase )
return metadata, index
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
_lowerCamelCase : List[str] = parser.parse_args()
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_lowerCamelCase : Tuple = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_lowerCamelCase : Dict = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path) | 686 | 0 |
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase = input("""Enter image url: """).strip()
print(F"Downloading image from {url} ...")
lowerCamelCase = BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase = soup.find("""meta""", {"""property""": """og:image"""})["""content"""]
lowerCamelCase = requests.get(image_url).content
lowerCamelCase = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(F"Done. Image saved to disk as {file_name}.")
| 82 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_lowerCamelCase : List[Any] = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_lowerCamelCase : Any = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_lowerCamelCase : Union[str, Any] = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def a_ ( __lowercase : List[Any] , __lowercase : Any ) -> Union[str, Any]:
return float((preds == labels).mean() )
def a_ ( __lowercase : Optional[Any] , __lowercase : List[str] ) -> Dict:
_snake_case = simple_accuracy(__lowercase , __lowercase )
_snake_case = float(fa_score(y_true=__lowercase , y_pred=__lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( __lowercase : int , __lowercase : str ) -> str:
_snake_case = float(pearsonr(__lowercase , __lowercase )[0] )
_snake_case = float(spearmanr(__lowercase , __lowercase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def A ( self : List[Any] , lowercase : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowercase , lowercase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowercase , lowercase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowercase , lowercase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' ) | 686 | 0 |
"""simple docstring"""
def snake_case_ ( A_ : float ):
'''simple docstring'''
return 10 - x * x
def snake_case_ ( A_ : float, A_ : float ):
'''simple docstring'''
if equation(A_ ) * equation(A_ ) >= 0:
raise ValueError('''Wrong space!''' )
_lowerCamelCase : Any = a
while (b - a) >= 0.01:
# Find middle point
_lowerCamelCase : List[str] = (a + b) / 2
# Check if middle point is root
if equation(A_ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(A_ ) * equation(A_ ) < 0:
_lowerCamelCase : Optional[Any] = c
else:
_lowerCamelCase : Dict = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 83 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_lowerCamelCase : Dict = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = "sequence-classification"
def __init__( self : Optional[int] , lowercase : Any ):
'''simple docstring'''
if type(lowercase ) == dict:
_snake_case = Namespace(**lowercase )
_snake_case = glue_output_modes[hparams.task]
_snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(lowercase , lowercase , self.mode )
def A ( self : Optional[Any] , **lowercase : Optional[Any] ):
'''simple docstring'''
return self.model(**lowercase )
def A ( self : Optional[Any] , lowercase : str , lowercase : Tuple ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case = outputs[0]
_snake_case = self.trainer.lr_schedulers[0]['scheduler']
_snake_case = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.hparams
_snake_case = processors[args.task]()
_snake_case = processor.get_labels()
for mode in ["train", "dev"]:
_snake_case = self._feature_file(lowercase )
if os.path.exists(lowercase ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , lowercase )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_snake_case = convert_examples_to_features(
lowercase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , lowercase )
torch.save(lowercase , lowercase )
def A ( self : Dict , lowercase : str , lowercase : int , lowercase : bool = False ):
'''simple docstring'''
_snake_case = 'dev' if mode == 'test' else mode
_snake_case = self._feature_file(lowercase )
logger.info('Loading features from cached file %s' , lowercase )
_snake_case = torch.load(lowercase )
_snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(lowercase , lowercase , lowercase , lowercase ) , batch_size=lowercase , shuffle=lowercase , )
def A ( self : str , lowercase : Optional[Any] , lowercase : str ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case , _snake_case = outputs[:2]
_snake_case = logits.detach().cpu().numpy()
_snake_case = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : int , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_snake_case = np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_snake_case = np.argmax(lowercase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_snake_case = np.squeeze(lowercase )
_snake_case = np.concatenate([x['target'] for x in outputs] , axis=0 )
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , lowercase , lowercase )}
_snake_case = dict(results.items() )
_snake_case = results
return ret, preds_list, out_label_list
def A ( self : int , lowercase : list ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : List[str] , lowercase : Any ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( lowercase : Tuple , lowercase : Any ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(lowercase , lowercase )
parser.add_argument(
'--max_seq_length' , default=128 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=lowercase , required=lowercase , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=lowercase , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def a_ ( ) -> Union[str, Any]:
_snake_case = argparse.ArgumentParser()
add_generic_args(__lowercase , os.getcwd() )
_snake_case = GLUETransformer.add_model_specific_args(__lowercase , os.getcwd() )
_snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_snake_case = os.path.join(
'./results' , f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_snake_case = GLUETransformer(__lowercase )
_snake_case = generic_train(__lowercase , __lowercase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_snake_case = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=__lowercase ) )
_snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__lowercase )
if __name__ == "__main__":
main() | 686 | 0 |
from __future__ import annotations
class A_ :
'''simple docstring'''
def __init__( self , snake_case ):
lowercase = order
# a_{0} ... a_{k}
lowercase = [1.0] + [0.0] * order
# b_{0} ... b_{k}
lowercase = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
lowercase = [0.0] * self.order
# y[n-1] ... y[n-k]
lowercase = [0.0] * self.order
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
if len(snake_case ) < self.order:
lowercase = [1.0, *a_coeffs]
if len(snake_case ) != self.order + 1:
lowercase = (
F'''Expected a_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(snake_case )}'''
)
raise ValueError(snake_case )
if len(snake_case ) != self.order + 1:
lowercase = (
F'''Expected b_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(snake_case )}'''
)
raise ValueError(snake_case )
lowercase = a_coeffs
lowercase = b_coeffs
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
lowercase = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
lowercase = self.input_history[:-1]
lowercase = self.output_history[:-1]
lowercase = sample
lowercase = result
return result
| 84 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = LEDConfig
_UpperCAmelCase : int = {}
_UpperCAmelCase : List[str] = "gelu"
def __init__( self : Union[str, Any] , lowercase : Optional[int] , lowercase : Dict=13 , lowercase : Dict=7 , lowercase : Tuple=True , lowercase : Dict=False , lowercase : Dict=99 , lowercase : Any=32 , lowercase : List[Any]=2 , lowercase : List[str]=4 , lowercase : List[str]=37 , lowercase : Dict=0.1 , lowercase : int=0.1 , lowercase : List[Any]=20 , lowercase : int=2 , lowercase : Optional[Any]=1 , lowercase : List[str]=0 , lowercase : Optional[int]=4 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = eos_token_id
_snake_case = pad_token_id
_snake_case = bos_token_id
_snake_case = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case = prepare_led_inputs_dict(lowercase , lowercase , lowercase )
_snake_case = tf.concat(
[tf.zeros_like(lowercase )[:, :-1], tf.ones_like(lowercase )[:, -1:]] , axis=-1 , )
_snake_case = global_attention_mask
return config, inputs_dict
def A ( self : str , lowercase : str , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = TFLEDModel(config=lowercase ).get_decoder()
_snake_case = inputs_dict['input_ids']
_snake_case = input_ids[:1, :]
_snake_case = inputs_dict['attention_mask'][:1, :]
_snake_case = 1
# first forward pass
_snake_case = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
_snake_case , _snake_case = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case = model(lowercase , attention_mask=lowercase )[0]
_snake_case = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case = output_from_no_past[:, -3:, random_slice_idx]
_snake_case = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : str=None , ) -> Union[str, Any]:
if attention_mask is None:
_snake_case = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_UpperCAmelCase : Optional[int] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_UpperCAmelCase : Tuple = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCAmelCase : str = True
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : str = False
_UpperCAmelCase : List[Any] = False
def A ( self : Any ):
'''simple docstring'''
_snake_case = TFLEDModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = tf.zeros_like(inputs_dict['attention_mask'] )
_snake_case = 2
_snake_case = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
_snake_case = True
_snake_case = self.model_tester.seq_length
_snake_case = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase : List[str] ):
_snake_case = outputs.decoder_attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase : List[str] ):
_snake_case = [t.numpy() for t in outputs.encoder_attentions]
_snake_case = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = False
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
_snake_case = len(lowercase )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
if self.is_encoder_decoder:
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_decoder_attentions_output(lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
# Check attention is always last and order is fine
_snake_case = True
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase ) )
self.assertEqual(model.config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def A ( self : List[Any] ):
'''simple docstring'''
pass
def A ( self : Any ):
'''simple docstring'''
pass
def a_ ( __lowercase : str ) -> Optional[Any]:
return tf.constant(__lowercase , dtype=tf.intaa )
_lowerCamelCase : List[Any] = 1E-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 )
def A ( self : str ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 , rtol=1E-3 ) | 686 | 0 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class snake_case ( unittest.TestCase ):
def __lowercase( self : Dict , a_ : Optional[int] )-> Dict:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(a_ )
def __lowercase( self : Optional[int] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE__ : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
SCREAMING_SNAKE_CASE__ : List[Any] = PyTorchBenchmark(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowercase( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = 'sgugger/tiny-distilbert-classification'
SCREAMING_SNAKE_CASE__ : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , only_pretrain_model=a_ , )
SCREAMING_SNAKE_CASE__ : Tuple = PyTorchBenchmark(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowercase( self : Optional[Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , torchscript=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
SCREAMING_SNAKE_CASE__ : List[Any] = PyTorchBenchmark(a_ )
SCREAMING_SNAKE_CASE__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def __lowercase( self : List[Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE__ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , fpaa=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
SCREAMING_SNAKE_CASE__ : Tuple = PyTorchBenchmark(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowercase( self : Any )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE__ : Tuple = AutoConfig.from_pretrained(a_ )
# set architectures equal to `None`
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
SCREAMING_SNAKE_CASE__ : List[Any] = PyTorchBenchmark(a_ , configs=[config] )
SCREAMING_SNAKE_CASE__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowercase( self : int )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
SCREAMING_SNAKE_CASE__ : Optional[int] = PyTorchBenchmark(a_ )
SCREAMING_SNAKE_CASE__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def __lowercase( self : Any )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE__ : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=a_ , multi_process=a_ , )
SCREAMING_SNAKE_CASE__ : List[Any] = PyTorchBenchmark(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowercase( self : str )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE__ : List[str] = AutoConfig.from_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
SCREAMING_SNAKE_CASE__ : Tuple = PyTorchBenchmark(a_ , configs=[config] )
SCREAMING_SNAKE_CASE__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowercase( self : List[Any] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = 'sshleifer/tinier_bart'
SCREAMING_SNAKE_CASE__ : Tuple = AutoConfig.from_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PyTorchBenchmark(a_ , configs=[config] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowercase( self : Dict )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoConfig.from_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
SCREAMING_SNAKE_CASE__ : List[str] = PyTorchBenchmark(a_ , configs=[config] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowercase( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = 'sshleifer/tinier_bart'
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoConfig.from_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
SCREAMING_SNAKE_CASE__ : Tuple = PyTorchBenchmark(a_ , configs=[config] )
SCREAMING_SNAKE_CASE__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowercase( self : Any )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , save_to_csv=a_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a_ , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(a_ , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(a_ , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(a_ , 'train_time.csv' ) , env_info_csv_file=os.path.join(a_ , 'env.csv' ) , multi_process=a_ , )
SCREAMING_SNAKE_CASE__ : str = PyTorchBenchmark(a_ )
benchmark.run()
self.assertTrue(Path(os.path.join(a_ , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a_ , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a_ , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a_ , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a_ , 'env.csv' ) ).exists() )
def __lowercase( self : List[str] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(a_ : Tuple ):
self.assertTrue(hasattr(a_ , 'sequential' ) )
self.assertTrue(hasattr(a_ , 'cumulative' ) )
self.assertTrue(hasattr(a_ , 'current' ) )
self.assertTrue(hasattr(a_ , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a_ , 'log.txt' ) , log_print=a_ , trace_memory_line_by_line=a_ , multi_process=a_ , )
SCREAMING_SNAKE_CASE__ : int = PyTorchBenchmark(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(a_ , 'log.txt' ) ).exists() )
| 85 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_lowerCamelCase : Union[str, Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_lowerCamelCase : Union[str, Any] = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
_lowerCamelCase : Optional[int] = '''zero2'''
_lowerCamelCase : List[Any] = '''zero3'''
_lowerCamelCase : Dict = [ZEROa, ZEROa]
def a_ ( __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Tuple ) -> Dict:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_snake_case = parameterized.to_safe_name('_'.join(str(__lowercase ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
_lowerCamelCase : Dict = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : List[Any] , lowercase : Dict ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Any , lowercase : str , lowercase : List[str] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : Optional[Any] , lowercase : Optional[int] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
def A ( self : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
pass
def A ( self : str , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : bool = True , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = models[model]
_snake_case = self.run_trainer(
stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , )
self.do_checks(lowercase )
return output_dir
def A ( self : Any , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : int = 1 , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase )
_snake_case = f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_snake_case = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_snake_case = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_snake_case = self.get_launcher(lowercase )
_snake_case = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase , env=self.get_env() )
return output_dir
def A ( self : List[str] , lowercase : Any=False ):
'''simple docstring'''
_snake_case = min(2 , get_gpu_count() ) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split() | 686 | 0 |
import os
def __snake_case ( ):
"""simple docstring"""
with open(os.path.dirname(__UpperCamelCase ) + "/grid.txt" ) as f:
A_ = [] # noqa: E741
for _ in range(20 ):
l.append([int(__UpperCamelCase ) for x in f.readline().split()] )
A_ = 0
# right
for i in range(20 ):
for j in range(17 ):
A_ = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
A_ = temp
# down
for i in range(17 ):
for j in range(20 ):
A_ = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
A_ = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
A_ = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
A_ = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
A_ = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
A_ = temp
return maximum
if __name__ == "__main__":
print(solution()) | 86 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCamelCase : int = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 0 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''EncodecFeatureExtractor'''
UpperCAmelCase__ = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int) ->Dict:
'''simple docstring'''
super().__init__(UpperCAmelCase__ , UpperCAmelCase__)
A__ = self.feature_extractor
A__ = False
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Tuple=True) ->str:
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=UpperCAmelCase__ , language=UpperCAmelCase__ , no_timestamps=UpperCAmelCase__)
def __call__( self : Any , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Union[str, Any]) ->int:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*UpperCAmelCase__ , **UpperCAmelCase__)
A__ = kwargs.pop('''audio''' , UpperCAmelCase__)
A__ = kwargs.pop('''sampling_rate''' , UpperCAmelCase__)
A__ = kwargs.pop('''text''' , UpperCAmelCase__)
if len(UpperCAmelCase__) > 0:
A__ = args[0]
A__ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''')
if text is not None:
A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__)
if audio is not None:
A__ = self.feature_extractor(UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__)
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
A__ = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
A__ = audio_inputs['''padding_mask''']
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Any) ->List[str]:
'''simple docstring'''
A__ = kwargs.pop('''audio''' , UpperCAmelCase__)
A__ = kwargs.pop('''padding_mask''' , UpperCAmelCase__)
if len(UpperCAmelCase__) > 0:
A__ = args[0]
A__ = args[1:]
if audio_values is not None:
return self._decode_audio(UpperCAmelCase__ , padding_mask=UpperCAmelCase__)
else:
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int) ->str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional = None) ->List[np.ndarray]:
'''simple docstring'''
A__ = to_numpy(UpperCAmelCase__)
A__ , A__ , A__ = audio_values.shape
if padding_mask is None:
return list(UpperCAmelCase__)
A__ = to_numpy(UpperCAmelCase__)
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
A__ = seq_len - padding_mask.shape[-1]
A__ = 1 - self.feature_extractor.padding_value
A__ = np.pad(UpperCAmelCase__ , ((0, 0), (0, difference)) , '''constant''' , constant_values=UpperCAmelCase__)
A__ = audio_values.tolist()
for i in range(UpperCAmelCase__):
A__ = np.asarray(audio_values[i])[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
A__ = sliced_audio.reshape(UpperCAmelCase__ , -1)
return audio_values
| 87 |
import random
from .binary_exp_mod import bin_exp_mod
def a_ ( __lowercase : int , __lowercase : Any=1_000 ) -> int:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_snake_case = n - 1
_snake_case = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_snake_case = 0
while count < prec:
_snake_case = random.randint(2 , n - 1 )
_snake_case = bin_exp_mod(__lowercase , __lowercase , __lowercase )
if b != 1:
_snake_case = True
for _ in range(__lowercase ):
if b == n - 1:
_snake_case = False
break
_snake_case = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_lowerCamelCase : Tuple = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i))) | 686 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_lowerCamelCase : int = re.compile(r'''\s+''')
def a_ ( __lowercase : List[Any] ) -> int:
return {"hash": hashlib.mda(re.sub(__lowercase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def a_ ( __lowercase : List[Any] ) -> Dict:
_snake_case = [len(__lowercase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(__lowercase ), "line_max": max(__lowercase )}
def a_ ( __lowercase : Optional[int] ) -> List[str]:
_snake_case = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] ) -> Optional[int]:
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def a_ ( __lowercase : Union[str, Any] , __lowercase : int=5 ) -> Optional[Any]:
_snake_case = ['auto-generated', 'autogenerated', 'automatically generated']
_snake_case = example['content'].splitlines()
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a_ ( __lowercase : List[Any] , __lowercase : int=5 , __lowercase : Tuple=0.0_5 ) -> Union[str, Any]:
_snake_case = ['unit tests', 'test file', 'configuration file']
_snake_case = example['content'].splitlines()
_snake_case = 0
_snake_case = 0
# first test
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_snake_case = example['content'].count('\n' )
_snake_case = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a_ ( __lowercase : Union[str, Any] ) -> Any:
_snake_case = ['def ', 'class ', 'for ', 'while ']
_snake_case = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a_ ( __lowercase : Tuple , __lowercase : Any=4 ) -> List[str]:
_snake_case = example['content'].splitlines()
_snake_case = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a_ ( __lowercase : Dict ) -> Dict:
_snake_case = tokenizer(example['content'] , truncation=__lowercase )['input_ids']
_snake_case = len(example['content'] ) / len(__lowercase )
return {"ratio": ratio}
def a_ ( __lowercase : Optional[Any] ) -> Any:
_snake_case = {}
results.update(get_hash(__lowercase ) )
results.update(line_stats(__lowercase ) )
results.update(alpha_stats(__lowercase ) )
results.update(char_token_ratio(__lowercase ) )
results.update(is_autogenerated(__lowercase ) )
results.update(is_config_or_test(__lowercase ) )
results.update(has_no_keywords(__lowercase ) )
results.update(has_few_assignments(__lowercase ) )
return results
def a_ ( __lowercase : Optional[int] , __lowercase : str , __lowercase : List[Any] ) -> int:
if not check_uniques(__lowercase , __lowercase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a_ ( __lowercase : Dict ) -> Dict:
with open(__lowercase , 'rb' ) as f_in:
with gzip.open(str(__lowercase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(__lowercase , __lowercase )
os.unlink(__lowercase )
# Settings
_lowerCamelCase : Dict = HfArgumentParser(PreprocessingArguments)
_lowerCamelCase : Dict = parser.parse_args()
if args.num_workers is None:
_lowerCamelCase : int = multiprocessing.cpu_count()
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_lowerCamelCase : Any = time.time()
_lowerCamelCase : Optional[Any] = load_dataset(args.dataset_name, split='''train''')
print(F'Time to load dataset: {time.time()-t_start:.2f}')
# Run preprocessing
_lowerCamelCase : Optional[int] = time.time()
_lowerCamelCase : Union[str, Any] = ds.map(preprocess, num_proc=args.num_workers)
print(F'Time to preprocess dataset: {time.time()-t_start:.2f}')
# Deduplicate hashes
_lowerCamelCase : List[Any] = set(ds.unique('''hash'''))
_lowerCamelCase : Dict = len(uniques) / len(ds)
print(F'Fraction of duplicates: {1-frac:.2%}')
# Deduplicate data and apply heuristics
_lowerCamelCase : List[Any] = time.time()
_lowerCamelCase : Optional[int] = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F'Time to filter dataset: {time.time()-t_start:.2f}')
print(F'Size of filtered dataset: {len(ds_filter)}')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_lowerCamelCase : Union[str, Any] = time.time()
_lowerCamelCase , _lowerCamelCase : Dict = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'Time to deduplicate dataset: {time.time()-t_start:.2f}')
print(F'Size of deduplicate dataset: {len(ds_filter)}')
# Save data in batches of samples_per_file
_lowerCamelCase : Optional[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
_lowerCamelCase : int = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
_lowerCamelCase : Union[str, Any] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_lowerCamelCase : Dict = str(data_dir / F'file-{file_number+1:012}.json')
_lowerCamelCase : str = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'Time to save dataset: {time.time()-t_start:.2f}') | 686 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE : Tuple = TypeVar("T")
SCREAMING_SNAKE_CASE : List[Any] = TypeVar("U")
class _lowerCamelCase( Generic[T, U] ):
def __init__( self, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Optional[int] = key
_lowercase : Union[str, Any] = val
_lowercase : DoubleLinkedListNode[T, U] | None = None
_lowercase : DoubleLinkedListNode[T, U] | None = None
def __repr__( self) -> str:
"""simple docstring"""
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next)}, has prev: {bool(self.prev)}'''
)
class _lowerCamelCase( Generic[T, U] ):
def __init__( self) -> None:
"""simple docstring"""
_lowercase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowerCamelCase, lowerCamelCase)
_lowercase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowerCamelCase, lowerCamelCase)
_lowercase , _lowercase : Tuple = self.rear, self.head
def __repr__( self) -> str:
"""simple docstring"""
_lowercase : int = ['DoubleLinkedList']
_lowercase : Optional[int] = self.head
while node.next is not None:
rep.append(str(lowerCamelCase))
_lowercase : int = node.next
rep.append(str(self.rear))
return ",\n ".join(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> None:
"""simple docstring"""
_lowercase : List[Any] = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_lowercase : Union[str, Any] = node
_lowercase : Dict = previous
_lowercase : Union[str, Any] = node
_lowercase : Union[str, Any] = self.rear
def UpperCamelCase ( self, lowerCamelCase) -> DoubleLinkedListNode[T, U] | None:
"""simple docstring"""
if node.prev is None or node.next is None:
return None
_lowercase : Union[str, Any] = node.next
_lowercase : Union[str, Any] = node.prev
_lowercase : Tuple = None
_lowercase : int = None
return node
class _lowerCamelCase( Generic[T, U] ):
lowercase_ : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
_lowercase : DoubleLinkedList[T, U] = DoubleLinkedList()
_lowercase : Optional[Any] = capacity
_lowercase : Optional[Any] = 0
_lowercase : str = 0
_lowercase : Optional[Any] = 0
_lowercase : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self) -> str:
"""simple docstring"""
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self, lowerCamelCase) -> bool:
"""simple docstring"""
return key in self.cache
def UpperCamelCase ( self, lowerCamelCase) -> U | None:
"""simple docstring"""
if key in self.cache:
self.hits += 1
_lowercase : DoubleLinkedListNode[T, U] = self.cache[key]
_lowercase : List[Any] = self.list.remove(self.cache[key])
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowerCamelCase)
return node.val
self.miss += 1
return None
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> None:
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_lowercase : List[str] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowerCamelCase) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_lowercase : Union[str, Any] = DoubleLinkedListNode(lowerCamelCase, lowerCamelCase)
self.list.add(self.cache[key])
self.num_keys += 1
else:
# bump node to the end of the list, update value
_lowercase : int = self.list.remove(self.cache[key])
assert node is not None # node guaranteed to be in list
_lowercase : str = value
self.list.add(lowerCamelCase)
@classmethod
def UpperCamelCase ( cls, lowerCamelCase = 1_28) -> Callable[[Callable[[T], U]], Callable[..., U]]:
"""simple docstring"""
def cache_decorator_inner(lowerCamelCase) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCamelCase) -> U:
if func not in cls.decorator_function_to_instance_map:
_lowercase : int = LRUCache(lowerCamelCase)
_lowercase : Optional[int] = cls.decorator_function_to_instance_map[func].get(args[0])
if result is None:
_lowercase : List[Any] = func(*lowerCamelCase)
cls.decorator_function_to_instance_map[func].put(args[0], lowerCamelCase)
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowerCamelCase, 'cache_info', lowerCamelCase) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : int = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = "yolos"
def __init__( self : int , lowercase : List[str]=768 , lowercase : Tuple=12 , lowercase : int=12 , lowercase : int=3_072 , lowercase : Optional[int]="gelu" , lowercase : str=0.0 , lowercase : Optional[int]=0.0 , lowercase : Optional[Any]=0.02 , lowercase : List[str]=1E-12 , lowercase : Dict=[512, 864] , lowercase : Union[str, Any]=16 , lowercase : List[Any]=3 , lowercase : List[str]=True , lowercase : Optional[int]=100 , lowercase : int=True , lowercase : Dict=False , lowercase : str=1 , lowercase : int=5 , lowercase : Tuple=2 , lowercase : List[str]=5 , lowercase : Any=2 , lowercase : List[str]=0.1 , **lowercase : int , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = qkv_bias
_snake_case = num_detection_tokens
_snake_case = use_mid_position_embeddings
_snake_case = auxiliary_loss
# Hungarian matcher
_snake_case = class_cost
_snake_case = bbox_cost
_snake_case = giou_cost
# Loss coefficients
_snake_case = bbox_loss_coefficient
_snake_case = giou_loss_coefficient
_snake_case = eos_coefficient
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = version.parse("1.11" )
@property
def A ( self : str ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Any ):
'''simple docstring'''
return 1E-4
@property
def A ( self : List[Any] ):
'''simple docstring'''
return 12 | 686 | 0 |
'''simple docstring'''
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class a__ ( a__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ = 1_01 ) -> Union[str, Any]:
lowerCAmelCase__ = length
def __len__( self ) -> Any:
return self.length
def __getitem__( self , lowerCamelCase_ ) -> int:
return i
class a__ :
'''simple docstring'''
def __call__( self , lowerCamelCase_ ) -> Any:
return {"input_ids": torch.tensor(lowerCamelCase_ ), "labels": torch.tensor(lowerCamelCase_ )}
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> List[str]:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowerCAmelCase__ = nn.Linear(1_20 , 80 )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=None ) -> List[Any]:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class a__ ( a__ ):
'''simple docstring'''
@require_torch_neuroncore
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = F"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ = F"""--output_dir {output_dir}""".split()
lowerCAmelCase__ = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class a__ ( a__ ):
'''simple docstring'''
@require_torch_multi_gpu
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = F"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ = F"""--output_dir {output_dir}""".split()
lowerCAmelCase__ = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__UpperCAmelCase = HfArgumentParser((TrainingArguments,))
__UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
f"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
__UpperCAmelCase = DummyDataset(dataset_length)
def _snake_case ( A ) -> Dict:
lowerCAmelCase__ = list(range(len(A ) ) )
lowerCAmelCase__ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
F"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
__UpperCAmelCase = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__UpperCAmelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__UpperCAmelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__UpperCAmelCase = 2
__UpperCAmelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__UpperCAmelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__UpperCAmelCase = None | 90 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_lowerCamelCase : Tuple = logging.get_logger(__name__)
# General docstring
_lowerCamelCase : Union[str, Any] = '''ResNetConfig'''
# Base docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[Any] = [1, 2_048, 7, 7]
# Image classification docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[int] = '''tiger cat'''
_lowerCamelCase : str = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 3 , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(
lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=kernel_size // 2 , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def A ( self : Union[str, Any] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def A ( self : Tuple , lowercase : Tensor ):
'''simple docstring'''
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowercase )
_snake_case = self.pooler(lowercase )
return embedding
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase : int , lowercase : int , lowercase : int = 2 ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(lowercase , lowercase , kernel_size=1 , stride=lowercase , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : List[str] , lowercase : List[str] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" , lowercase : int = 4 ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , kernel_size=1 ) , ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : Dict , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowercase : ResNetConfig , lowercase : int , lowercase : int , lowercase : int = 2 , lowercase : int = 2 , ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , stride=lowercase , activation=config.hidden_act ) , *[layer(lowercase , lowercase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase , config.depths[1:] ):
self.stages.append(ResNetStage(lowercase , lowercase , lowercase , depth=lowercase ) )
def A ( self : str , lowercase : Tensor , lowercase : bool = False , lowercase : bool = True ):
'''simple docstring'''
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowercase )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase , hidden_states=lowercase , )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = ResNetConfig
_UpperCAmelCase : Tuple = "resnet"
_UpperCAmelCase : Optional[Any] = "pixel_values"
_UpperCAmelCase : Dict = True
def A ( self : List[str] , lowercase : Dict ):
'''simple docstring'''
if isinstance(lowercase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowercase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def A ( self : Tuple , lowercase : List[Any] , lowercase : Optional[Any]=False ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
_snake_case = value
_lowerCamelCase : str = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCamelCase : int = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : Any ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : Union[str, Any] , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowercase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase : int ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowercase )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Union[str, Any] , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[torch.LongTensor] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowercase )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowercase , lowercase )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : Union[str, Any] ):
'''simple docstring'''
super().__init__(lowercase )
super()._init_backbone(lowercase )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@replace_return_docstrings(output_type=lowercase , config_class=_CONFIG_FOR_DOC )
def A ( self : Dict , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowercase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowercase , ) | 686 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowercase = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : str=8 ):
A = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
A = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _snake_case ( snake_case__ : List[Any] , snake_case__ : Optional[int]=512 , snake_case__ : Tuple=512 ):
A = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
A = np.array(pil_image.convert('RGB' ) )
A = arr.astype(np.floataa ) / 127.5 - 1
A = np.transpose(snake_case__ , [2, 0, 1] )
A = torch.from_numpy(snake_case__ ).unsqueeze(0 )
return image
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Any ,A_ : UNetaDConditionModel ,A_ : DDPMScheduler ,A_ : VQModel ,) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=A_ ,scheduler=A_ ,movq=A_ ,)
A = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : Tuple ,A_ : Tuple ) -> List[Any]:
# get the original timestep using init_timestep
A = min(int(num_inference_steps * strength ) ,A_ )
A = max(num_inference_steps - init_timestep ,0 )
A = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Optional[int] ,A_ : Tuple ,A_ : Optional[int] ,A_ : str ,A_ : Union[str, Any] ,A_ : Optional[Any] ,A_ : Union[str, Any]=None ) -> List[Any]:
if not isinstance(A_ ,(torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(A_ )}' )
A = image.to(device=A_ ,dtype=A_ )
A = batch_size * num_images_per_prompt
if image.shape[1] == 4:
A = image
else:
if isinstance(A_ ,A_ ) and len(A_ ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(A_ )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
elif isinstance(A_ ,A_ ):
A = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(A_ )
]
A = torch.cat(A_ ,dim=0 )
else:
A = self.movq.encode(A_ ).latent_dist.sample(A_ )
A = self.movq.config.scaling_factor * init_latents
A = torch.cat([init_latents] ,dim=0 )
A = init_latents.shape
A = randn_tensor(A_ ,generator=A_ ,device=A_ ,dtype=A_ )
# get latents
A = self.scheduler.add_noise(A_ ,A_ ,A_ )
A = init_latents
return latents
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : str=0 ) -> Optional[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
A = torch.device(F'cuda:{gpu_id}' )
A = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Tuple=0 ) -> List[Any]:
if is_accelerate_available() and is_accelerate_version('>=' ,'0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
A = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('cpu' ,silence_dtype_warnings=A_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
A = None
for cpu_offloaded_model in [self.unet, self.movq]:
A , A = cpu_offload_with_hook(A_ ,A_ ,prev_module_hook=A_ )
# We'll offload the last model manually.
A = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
if not hasattr(self.unet ,'_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(A_ ,'_hf_hook' )
and hasattr(module._hf_hook ,'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A_ )
def __call__( self : Optional[Any] ,A_ : Union[torch.FloatTensor, List[torch.FloatTensor]] ,A_ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] ,A_ : Union[torch.FloatTensor, List[torch.FloatTensor]] ,A_ : int = 512 ,A_ : int = 512 ,A_ : int = 100 ,A_ : float = 4.0 ,A_ : float = 0.3 ,A_ : int = 1 ,A_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,A_ : Optional[str] = "pil" ,A_ : bool = True ,) -> List[Any]:
A = self._execution_device
A = guidance_scale > 1.0
if isinstance(A_ ,A_ ):
A = torch.cat(A_ ,dim=0 )
A = image_embeds.shape[0]
if isinstance(A_ ,A_ ):
A = torch.cat(A_ ,dim=0 )
if do_classifier_free_guidance:
A = image_embeds.repeat_interleave(A_ ,dim=0 )
A = negative_image_embeds.repeat_interleave(A_ ,dim=0 )
A = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=A_ )
if not isinstance(A_ ,A_ ):
A = [image]
if not all(isinstance(A_ ,(PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F'Input is in incorrect format: {[type(A_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor' )
A = torch.cat([prepare_image(A_ ,A_ ,A_ ) for i in image] ,dim=0 )
A = image.to(dtype=image_embeds.dtype ,device=A_ )
A = self.movq.encode(A_ )['latents']
A = latents.repeat_interleave(A_ ,dim=0 )
self.scheduler.set_timesteps(A_ ,device=A_ )
A , A = self.get_timesteps(A_ ,A_ ,A_ )
A = timesteps[:1].repeat(batch_size * num_images_per_prompt )
A , A = downscale_height_and_width(A_ ,A_ ,self.movq_scale_factor )
A = self.prepare_latents(
A_ ,A_ ,A_ ,A_ ,image_embeds.dtype ,A_ ,A_ )
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = {'image_embeds': image_embeds}
A = self.unet(
sample=A_ ,timestep=A_ ,encoder_hidden_states=A_ ,added_cond_kwargs=A_ ,return_dict=A_ ,)[0]
if do_classifier_free_guidance:
A , A = noise_pred.split(latents.shape[1] ,dim=1 )
A , A = noise_pred.chunk(2 )
A , A = variance_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
A = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
A , A = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(
A_ ,A_ ,A_ ,generator=A_ ,)[0]
# post-processing
A = self.movq.decode(A_ ,force_not_quantize=A_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
A = image * 0.5 + 0.5
A = image.clamp(0 ,1 )
A = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
A = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ ) | 91 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 0 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
UpperCamelCase_ = trt.Logger(trt.Logger.WARNING)
UpperCamelCase_ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
UpperCamelCase_ = logging.getLogger(__name__)
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--onnx_model_path""",
default=None,
type=str,
required=True,
help="""Path to ONNX model: """,
)
parser.add_argument(
"""--output_dir""",
default=None,
type=str,
required=True,
help="""The output directory where the model checkpoints and predictions will be written.""",
)
# Other parameters
parser.add_argument(
"""--tokenizer_name""",
default="""""",
type=str,
required=True,
help="""Pretrained tokenizer name or path if not the same as model_name""",
)
parser.add_argument(
"""--version_2_with_negative""",
action="""store_true""",
help="""If true, the SQuAD examples contain some that do not have an answer.""",
)
parser.add_argument(
"""--null_score_diff_threshold""",
type=float,
default=0.0,
help="""If null_score - best_non_null is greater than the threshold predict null.""",
)
parser.add_argument(
"""--max_seq_length""",
default=384,
type=int,
help=(
"""The maximum total input sequence length after WordPiece tokenization. Sequences """
"""longer than this will be truncated, and sequences shorter than this will be padded."""
),
)
parser.add_argument(
"""--doc_stride""",
default=128,
type=int,
help="""When splitting up a long document into chunks, how much stride to take between chunks.""",
)
parser.add_argument("""--per_device_eval_batch_size""", default=8, type=int, help="""Batch size per GPU/CPU for evaluation.""")
parser.add_argument(
"""--n_best_size""",
default=20,
type=int,
help="""The total number of n-best predictions to generate in the nbest_predictions.json output file.""",
)
parser.add_argument(
"""--max_answer_length""",
default=30,
type=int,
help=(
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
),
)
parser.add_argument("""--seed""", type=int, default=42, help="""random seed for initialization""")
parser.add_argument(
"""--dataset_name""",
type=str,
default=None,
required=True,
help="""The name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--dataset_config_name""",
type=str,
default=None,
help="""The configuration name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--preprocessing_num_workers""", type=int, default=4, help="""A csv or a json file containing the training data."""
)
parser.add_argument("""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""")
parser.add_argument(
"""--fp16""",
action="""store_true""",
help="""Whether to use 16-bit (mixed) precision instead of 32-bit""",
)
parser.add_argument(
"""--int8""",
action="""store_true""",
help="""Whether to use INT8""",
)
UpperCamelCase_ = parser.parse_args()
if args.tokenizer_name:
UpperCamelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name."""
)
logger.info("""Training/evaluation parameters %s""", args)
UpperCamelCase_ = args.per_device_eval_batch_size
UpperCamelCase_ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
UpperCamelCase_ = True
UpperCamelCase_ = """temp_engine/bert-fp32.engine"""
if args.fpaa:
UpperCamelCase_ = """temp_engine/bert-fp16.engine"""
if args.inta:
UpperCamelCase_ = """temp_engine/bert-int8.engine"""
# import ONNX file
if not os.path.exists("""temp_engine"""):
os.makedirs("""temp_engine""")
UpperCamelCase_ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, """rb""") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
UpperCamelCase_ = [network.get_input(i) for i in range(network.num_inputs)]
UpperCamelCase_ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
UpperCamelCase_ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
UpperCamelCase_ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
UpperCamelCase_ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, """wb""") as f:
f.write(engine.serialize())
def _lowerCAmelCase ( __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : int ) -> int:
lowercase : Optional[Any] =np.asarray(inputs['''input_ids'''] , dtype=np.intaa )
lowercase : Optional[int] =np.asarray(inputs['''attention_mask'''] , dtype=np.intaa )
lowercase : Any =np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , __magic_name__ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , __magic_name__ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , __magic_name__ )
# start time
lowercase : Any =time.time()
# Run inference
context.execute_async(
bindings=[int(__magic_name__ ) for d_inp in d_inputs] + [int(__magic_name__ ), int(__magic_name__ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(__magic_name__ , __magic_name__ , __magic_name__ )
cuda.memcpy_dtoh_async(__magic_name__ , __magic_name__ , __magic_name__ )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowercase : Dict =time.time()
lowercase : Optional[int] =end_time - start_time
lowercase : Dict =(h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
UpperCamelCase_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
UpperCamelCase_ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("""Evaluation requires a dataset name""")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
UpperCamelCase_ = raw_datasets["""validation"""].column_names
UpperCamelCase_ = """question""" if """question""" in column_names else column_names[0]
UpperCamelCase_ = """context""" if """context""" in column_names else column_names[1]
UpperCamelCase_ = """answers""" if """answers""" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
UpperCamelCase_ = tokenizer.padding_side == """right"""
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
UpperCamelCase_ = min(args.max_seq_length, tokenizer.model_max_length)
def _lowerCAmelCase ( __magic_name__ : Optional[int] ) -> str:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
lowercase : Union[str, Any] =[q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowercase : int =tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=__magic_name__ , stride=args.doc_stride , return_overflowing_tokens=__magic_name__ , return_offsets_mapping=__magic_name__ , padding='''max_length''' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowercase : Union[str, Any] =tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowercase : Tuple =[]
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowercase : Dict =tokenized_examples.sequence_ids(__magic_name__ )
lowercase : Optional[int] =1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowercase : Union[str, Any] =sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowercase : Optional[int] =[
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
UpperCamelCase_ = raw_datasets["""validation"""]
# Validation Feature Creation
UpperCamelCase_ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="""Running tokenizer on validation dataset""",
)
UpperCamelCase_ = default_data_collator
UpperCamelCase_ = eval_dataset.remove_columns(["""example_id""", """offset_mapping"""])
UpperCamelCase_ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : str="eval" ) -> Tuple:
# Post-processing: we match the start logits and end logits to answers in the original context.
lowercase : Tuple =postprocess_qa_predictions(
examples=__magic_name__ , features=__magic_name__ , predictions=__magic_name__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=__magic_name__ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowercase : List[str] =[
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
lowercase : Union[str, Any] =[{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
lowercase : int =[{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=__magic_name__ , label_ids=__magic_name__ )
UpperCamelCase_ = load_metric("""squad_v2""" if args.version_2_with_negative else """squad""")
# Evaluation!
logger.info("""Loading ONNX model %s for evaluation""", args.onnx_model_path)
with open(engine_name, """rb""") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _lowerCAmelCase ( __magic_name__ : int ) -> Optional[int]:
return trt.volume(engine.get_binding_shape(__magic_name__ ) ) * engine.get_binding_dtype(__magic_name__ ).itemsize
# Allocate device memory for inputs and outputs.
UpperCamelCase_ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
UpperCamelCase_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
UpperCamelCase_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
UpperCamelCase_ = cuda.mem_alloc(h_outputa.nbytes)
UpperCamelCase_ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
UpperCamelCase_ = cuda.Stream()
# Evaluation
logger.info("""***** Running Evaluation *****""")
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
UpperCamelCase_ = 0.0
UpperCamelCase_ = 0
UpperCamelCase_ = timeit.default_timer()
UpperCamelCase_ = None
for step, batch in enumerate(eval_dataloader):
UpperCamelCase_ , UpperCamelCase_ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
UpperCamelCase_ , UpperCamelCase_ = outputs
UpperCamelCase_ = torch.tensor(start_logits)
UpperCamelCase_ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
UpperCamelCase_ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
UpperCamelCase_ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
UpperCamelCase_ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
UpperCamelCase_ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
UpperCamelCase_ = nested_truncate(all_preds, len(eval_dataset))
UpperCamelCase_ = timeit.default_timer() - start_time
logger.info(""" Evaluation done in total %f secs (%f sec per example)""", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("""Average Inference Time = {:.3f} ms""".format(total_time * 1000 / niter))
logger.info("""Total Inference Time = {:.3f} ms""".format(total_time * 1000))
logger.info("""Total Number of Inference = %d""", niter)
UpperCamelCase_ = post_processing_function(eval_examples, eval_dataset, all_preds)
UpperCamelCase_ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 92 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def a_ ( __lowercase : Union[str, Any] ) -> List[Any]:
_snake_case = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
_snake_case = DetaConfig(
backbone_config=__lowercase , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=__lowercase , with_box_refine=__lowercase , two_stage=__lowercase , )
# set labels
_snake_case = 'huggingface/label-files'
if "o365" in model_name:
_snake_case = 366
_snake_case = 'object365-id2label.json'
else:
_snake_case = 91
_snake_case = 'coco-detection-id2label.json'
_snake_case = num_labels
_snake_case = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type='dataset' ) ) , 'r' ) )
_snake_case = {int(__lowercase ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def a_ ( __lowercase : int ) -> str:
_snake_case = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : str ) -> Union[str, Any]:
_snake_case = dct.pop(__lowercase )
_snake_case = val
def a_ ( __lowercase : List[str] , __lowercase : str ) -> Dict:
_snake_case = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_snake_case = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:dim, :]
_snake_case = in_proj_bias[: dim]
_snake_case = in_proj_weight[
dim : dim * 2, :
]
_snake_case = in_proj_bias[
dim : dim * 2
]
_snake_case = in_proj_weight[
-dim :, :
]
_snake_case = in_proj_bias[-dim :]
# fmt: on
def a_ ( __lowercase : Dict , __lowercase : Dict ) -> str:
# transformer decoder self-attention layers
_snake_case = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:hidden_size, :]
_snake_case = in_proj_bias[:hidden_size]
_snake_case = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_snake_case = in_proj_bias[hidden_size : hidden_size * 2]
_snake_case = in_proj_weight[-hidden_size:, :]
_snake_case = in_proj_bias[-hidden_size:]
def a_ ( ) -> List[str]:
_snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a_ ( __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Tuple ) -> Optional[Any]:
_snake_case = get_deta_config(__lowercase )
# load original state dict
if model_name == "deta-swin-large":
_snake_case = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
_snake_case = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
_snake_case = torch.load(__lowercase , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(__lowercase , param.shape )
# rename keys
_snake_case = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_swin_q_k_v(__lowercase , config.backbone_config )
read_in_decoder_q_k_v(__lowercase , __lowercase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "input_proj" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
# finally, create HuggingFace model and load state dict
_snake_case = DetaForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
_snake_case = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(__lowercase )
# load image processor
_snake_case = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
_snake_case = prepare_img()
_snake_case = processor(images=__lowercase , return_tensors='pt' )
_snake_case = encoding['pixel_values']
_snake_case = model(pixel_values.to(__lowercase ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_snake_case = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
_snake_case = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
_snake_case = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
_snake_case = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__lowercase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__lowercase ) , atol=1E-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 686 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_lowerCamelCase : Dict = '''pt'''
elif is_tf_available():
_lowerCamelCase : List[str] = '''tf'''
else:
_lowerCamelCase : List[Any] = '''jax'''
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = PerceiverTokenizer
_UpperCAmelCase : Optional[int] = False
def A ( self : Tuple ):
'''simple docstring'''
super().setUp()
_snake_case = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A ( self : str ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def A ( self : Optional[int] , **lowercase : Dict ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : Optional[int] , lowercase : Tuple , lowercase : Optional[Any]=False , lowercase : int=20 , lowercase : Optional[int]=5 ):
'''simple docstring'''
_snake_case = []
for i in range(len(lowercase ) ):
try:
_snake_case = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_snake_case = list(filter(lambda lowercase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , lowercase ) )
_snake_case = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
_snake_case = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
_snake_case = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case = [t[0] for t in toks]
# Ensure consistency
_snake_case = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
_snake_case = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
_snake_case = ' ' + output_txt
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = 'Unicode €.'
_snake_case = tokenizer(lowercase )
_snake_case = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_snake_case = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]Unicode €.[SEP]' )
_snake_case = tokenizer('e è é ê ë' )
_snake_case = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_snake_case = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_snake_case = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
if FRAMEWORK != "jax":
_snake_case = list(batch.input_ids.numpy()[0] )
else:
_snake_case = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , lowercase )
self.assertIn('attention_mask' , lowercase )
self.assertNotIn('decoder_input_ids' , lowercase )
self.assertNotIn('decoder_attention_mask' , lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = [
'Summary of the text.',
'Another summary.',
]
_snake_case = tokenizer(
text_target=lowercase , max_length=32 , padding='max_length' , truncation=lowercase , return_tensors=lowercase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_snake_case = tokenizer.__class__.from_pretrained(lowercase )
_snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
shutil.rmtree(lowercase )
_snake_case = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_snake_case = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_snake_case = tokenizer.__class__.from_pretrained(lowercase )
_snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_snake_case = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowercase )
_snake_case = [f'''<extra_id_{i}>''' for i in range(125 )]
_snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
_snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case = tokenizer_class.from_pretrained(
lowercase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=lowercase )]
_snake_case = tokenizer_class.from_pretrained(
lowercase , additional_special_tokens=lowercase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
pass
def A ( self : List[str] ):
'''simple docstring'''
pass
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : int ):
'''simple docstring'''
_snake_case = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_snake_case = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
_snake_case = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(lowercase , lowercase ) | 686 | 0 |
'''simple docstring'''
import numpy
# List of input, output pairs
SCREAMING_SNAKE_CASE = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
SCREAMING_SNAKE_CASE = (((515, 22, 13), 555), ((61, 35, 49), 150))
SCREAMING_SNAKE_CASE = [2, 4, 1, 5]
SCREAMING_SNAKE_CASE = len(train_data)
SCREAMING_SNAKE_CASE = 0.009
def lowercase_ ( __A : int , __A : Dict="train" ) -> Any:
"""simple docstring"""
return calculate_hypothesis_value(__A , __A ) - output(
__A , __A )
def lowercase_ ( __A : Dict ) -> List[str]:
"""simple docstring"""
lowercase : Dict =0
for i in range(len(__A ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def lowercase_ ( __A : Tuple , __A : List[Any] ) -> Optional[int]:
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def lowercase_ ( __A : int , __A : int ) -> List[str]:
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def lowercase_ ( __A : Optional[Any] , __A : int=m ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Tuple =0
for i in range(__A ):
if index == -1:
summation_value += _error(__A )
else:
summation_value += _error(__A ) * train_data[i][0][index]
return summation_value
def lowercase_ ( __A : Dict ) -> List[str]:
"""simple docstring"""
lowercase : Optional[int] =summation_of_cost_derivative(__A , __A ) / m
return cost_derivative_value
def lowercase_ ( ) -> List[str]:
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowercase : int =0.000002
lowercase : Optional[Any] =0
lowercase : str =0
while True:
j += 1
lowercase : Optional[Any] =[0, 0, 0, 0]
for i in range(0 , len(__A ) ):
lowercase : Any =get_cost_derivative(i - 1 )
lowercase : Tuple =(
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__A , __A , atol=__A , rtol=__A , ):
break
lowercase : List[str] =temp_parameter_vector
print(('''Number of iterations:''', j) )
def lowercase_ ( ) -> Dict:
"""simple docstring"""
for i in range(len(__A ) ):
print(('''Actual output value:''', output(__A , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(__A , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 94 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def a_ ( ) -> Optional[int]:
_snake_case , _snake_case = 9, 14 # noqa: F841
_snake_case = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_snake_case = defaultdict(__lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_snake_case = mst(__lowercase )
_snake_case = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_snake_case = tuple(answer[:2] )
_snake_case = tuple(edge[::-1] )
assert edge in result or reverse in result | 686 | 0 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowerCamelCase_ = NewType('''DataClass''', Any)
lowerCamelCase_ = NewType('''DataClassType''', Any)
def snake_case ( A__ ):
if isinstance(A__ ,A__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def snake_case ( A__ ):
UpperCAmelCase_ : Optional[Any] = {str(A__ ): choice for choice in choices}
return lambda A__ : str_to_choice.get(A__ ,A__ )
def snake_case ( *,
A__ = None ,A__ = None ,A__ = dataclasses.MISSING ,A__ = dataclasses.MISSING ,A__ = None ,**A__ ,):
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
UpperCAmelCase_ : int = {}
if aliases is not None:
UpperCAmelCase_ : List[str] = aliases
if help is not None:
UpperCAmelCase_ : Optional[int] = help
return dataclasses.field(metadata=A__ ,default=A__ ,default_factory=A__ ,**A__ )
class UpperCamelCase_ (__A ):
__magic_name__ = 42
def __init__( self : Optional[Any] , lowerCAmelCase_ : Union[DataClassType, Iterable[DataClassType]] , **lowerCAmelCase_ : str ) -> Optional[Any]:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
UpperCAmelCase_ : Tuple = ArgumentDefaultsHelpFormatter
super().__init__(**lowerCAmelCase_ )
if dataclasses.is_dataclass(lowerCAmelCase_ ):
UpperCAmelCase_ : Optional[Any] = [dataclass_types]
UpperCAmelCase_ : int = list(lowerCAmelCase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowerCAmelCase_ )
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase_ : ArgumentParser , lowerCAmelCase_ : dataclasses.Field ) -> Tuple:
UpperCAmelCase_ : Any = f"""--{field.name}"""
UpperCAmelCase_ : Any = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , lowerCAmelCase_ ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
UpperCAmelCase_ : Optional[Any] = kwargs.pop("aliases" , [] )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : Any = [aliases]
UpperCAmelCase_ : Tuple = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(lowerCAmelCase_ , "UnionType" ) and isinstance(lowerCAmelCase_ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowerCAmelCase_ ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
f""" Problem encountered in field '{field.name}'.""" )
if type(lowerCAmelCase_ ) not in field.type.__args__:
# filter `str` in Union
UpperCAmelCase_ : Optional[Any] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
UpperCAmelCase_ : int = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
UpperCAmelCase_ : Any = (
field.type.__args__[0] if isinstance(lowerCAmelCase_ , field.type.__args__[1] ) else field.type.__args__[1]
)
UpperCAmelCase_ : Optional[Any] = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
UpperCAmelCase_ : List[Any] = {}
if origin_type is Literal or (isinstance(field.type , lowerCAmelCase_ ) and issubclass(field.type , lowerCAmelCase_ )):
if origin_type is Literal:
UpperCAmelCase_ : str = field.type.__args__
else:
UpperCAmelCase_ : List[Any] = [x.value for x in field.type]
UpperCAmelCase_ : Optional[int] = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
UpperCAmelCase_ : Dict = field.default
else:
UpperCAmelCase_ : str = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
UpperCAmelCase_ : Any = copy(lowerCAmelCase_ )
# Hack because type=bool in argparse does not behave as we want.
UpperCAmelCase_ : Any = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
UpperCAmelCase_ : Optional[int] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
UpperCAmelCase_ : int = default
# This tells argparse we accept 0 or 1 value after --field_name
UpperCAmelCase_ : Union[str, Any] = "?"
# This is the value that will get picked if we do --field_name (without value)
UpperCAmelCase_ : List[Any] = True
elif isclass(lowerCAmelCase_ ) and issubclass(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : List[str] = field.type.__args__[0]
UpperCAmelCase_ : int = "+"
if field.default_factory is not dataclasses.MISSING:
UpperCAmelCase_ : Union[str, Any] = field.default_factory()
elif field.default is dataclasses.MISSING:
UpperCAmelCase_ : List[Any] = True
else:
UpperCAmelCase_ : Tuple = field.type
if field.default is not dataclasses.MISSING:
UpperCAmelCase_ : Any = field.default
elif field.default_factory is not dataclasses.MISSING:
UpperCAmelCase_ : str = field.default_factory()
else:
UpperCAmelCase_ : Union[str, Any] = True
parser.add_argument(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
UpperCAmelCase_ : Union[str, Any] = False
parser.add_argument(f"""--no_{field.name}""" , action="store_false" , dest=field.name , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : DataClassType ) -> int:
if hasattr(lowerCAmelCase_ , "_argument_group_name" ):
UpperCAmelCase_ : Union[str, Any] = self.add_argument_group(dtype._argument_group_name )
else:
UpperCAmelCase_ : Tuple = self
try:
UpperCAmelCase_ : Dict[str, type] = get_type_hints(lowerCAmelCase_ )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(lowerCAmelCase_ ):
UpperCAmelCase_ : Union[str, Any] = ".".join(map(lowerCAmelCase_ , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(lowerCAmelCase_ ):
if not field.init:
continue
UpperCAmelCase_ : Optional[Any] = type_hints[field.name]
self._parse_dataclass_field(lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
UpperCAmelCase_ : Dict = []
if args_filename:
args_files.append(Path(lowerCAmelCase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
UpperCAmelCase_ : int = ArgumentParser()
args_file_parser.add_argument(lowerCAmelCase_ , type=lowerCAmelCase_ , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = args_file_parser.parse_known_args(args=lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = vars(lowerCAmelCase_ ).get(args_file_flag.lstrip("-" ) , lowerCAmelCase_ )
if cmd_args_file_paths:
args_files.extend([Path(lowerCAmelCase_ ) for p in cmd_args_file_paths] )
UpperCAmelCase_ : Dict = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
UpperCAmelCase_ : Union[str, Any] = file_args + args if args is not None else file_args + sys.argv[1:]
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.parse_known_args(args=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = []
for dtype in self.dataclass_types:
UpperCAmelCase_ : Union[str, Any] = {f.name for f in dataclasses.fields(lowerCAmelCase_ ) if f.init}
UpperCAmelCase_ : Optional[int] = {k: v for k, v in vars(lowerCAmelCase_ ).items() if k in keys}
for k in keys:
delattr(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = dtype(**lowerCAmelCase_ )
outputs.append(lowerCAmelCase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowerCAmelCase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Dict[str, Any] , lowerCAmelCase_ : bool = False ) -> Tuple[DataClass, ...]:
UpperCAmelCase_ : Dict = set(args.keys() )
UpperCAmelCase_ : Dict = []
for dtype in self.dataclass_types:
UpperCAmelCase_ : Dict = {f.name for f in dataclasses.fields(lowerCAmelCase_ ) if f.init}
UpperCAmelCase_ : Dict = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
UpperCAmelCase_ : List[str] = dtype(**lowerCAmelCase_ )
outputs.append(lowerCAmelCase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(lowerCAmelCase_ )}""" )
return tuple(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : bool = False ) -> Tuple[DataClass, ...]:
with open(Path(lowerCAmelCase_ ) , encoding="utf-8" ) as open_json_file:
UpperCAmelCase_ : List[Any] = json.loads(open_json_file.read() )
UpperCAmelCase_ : Any = self.parse_dict(lowerCAmelCase_ , allow_extra_keys=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : bool = False ) -> Tuple[DataClass, ...]:
UpperCAmelCase_ : Optional[int] = self.parse_dict(yaml.safe_load(Path(lowerCAmelCase_ ).read_text() ) , allow_extra_keys=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 95 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Tuple = ["transformers", "torch", "note_seq"]
def __init__( self : List[Any] , *lowercase : List[Any] , **lowercase : Dict ):
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] ) | 686 | 0 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowerCamelCase = 16
__lowerCamelCase = 32
def a ( __UpperCAmelCase : Accelerator , __UpperCAmelCase : int = 1_6 , __UpperCAmelCase : str = "bert-base-cased" ) -> str:
__magic_name__: Tuple = AutoTokenizer.from_pretrained(__UpperCAmelCase )
__magic_name__: Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__UpperCAmelCase : Dict ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__: Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__magic_name__: List[Any] = datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=__UpperCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__: int = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__UpperCAmelCase : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCAmelCase , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" )
return tokenizer.pad(__UpperCAmelCase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__magic_name__: Optional[Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
__magic_name__: Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
return train_dataloader, eval_dataloader
def a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> Any:
model.eval()
__magic_name__: List[str] = 0
for step, batch in enumerate(__UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__: Union[str, Any] = model(**__UpperCAmelCase )
__magic_name__: Optional[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__magic_name__, __magic_name__: str = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__UpperCAmelCase ) - 1:
__magic_name__: List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__magic_name__: Dict = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__UpperCAmelCase , references=__UpperCAmelCase , )
__magic_name__: Any = metric.compute()
return eval_metric["accuracy"]
def a ( __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict ) -> Optional[Any]:
# Initialize accelerator
__magic_name__: List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__: Union[str, Any] = config["""lr"""]
__magic_name__: Dict = int(config["""num_epochs"""] )
__magic_name__: str = int(config["""seed"""] )
__magic_name__: Union[str, Any] = int(config["""batch_size"""] )
__magic_name__: Tuple = args.model_name_or_path
set_seed(__UpperCAmelCase )
__magic_name__, __magic_name__: Tuple = get_dataloaders(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__: Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(__UpperCAmelCase , return_dict=__UpperCAmelCase )
# Instantiate optimizer
__magic_name__: List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__magic_name__: Optional[int] = optimizer_cls(params=model.parameters() , lr=__UpperCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
__magic_name__: Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
__magic_name__: Optional[Any] = 1
__magic_name__: Any = (len(__UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__magic_name__: Dict = get_linear_schedule_with_warmup(
optimizer=__UpperCAmelCase , num_warmup_steps=0 , num_training_steps=__UpperCAmelCase , )
else:
__magic_name__: Tuple = DummyScheduler(__UpperCAmelCase , total_num_steps=__UpperCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__: Optional[Any] = accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# We need to keep track of how many total steps we have iterated over
__magic_name__: str = 0
# We also need to keep track of the stating epoch so files are named properly
__magic_name__: List[str] = 0
__magic_name__: Tuple = evaluate.load("""glue""" , """mrpc""" )
__magic_name__: Tuple = num_epochs
if args.partial_train_epoch is not None:
__magic_name__: int = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__magic_name__: Any = args.resume_from_checkpoint.split("""epoch_""" )[1]
__magic_name__: Optional[Any] = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__magic_name__: Union[str, Any] = int(__UpperCAmelCase ) + 1
__magic_name__: Union[str, Any] = evaluation_loop(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
accelerator.print("""resumed checkpoint performance:""" , __UpperCAmelCase )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , """r""" ) as f:
__magic_name__: Any = json.load(__UpperCAmelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__magic_name__: Optional[int] = {}
for epoch in range(__UpperCAmelCase , __UpperCAmelCase ):
model.train()
for step, batch in enumerate(__UpperCAmelCase ):
__magic_name__: Union[str, Any] = model(**__UpperCAmelCase )
__magic_name__: int = outputs.loss
__magic_name__: Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(__UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__magic_name__: int = f'epoch_{epoch}'
__magic_name__: Tuple = os.path.join(args.output_dir , __UpperCAmelCase )
accelerator.save_state(__UpperCAmelCase )
__magic_name__: Optional[Any] = evaluation_loop(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__magic_name__: str = accuracy
__magic_name__: Tuple = lr_scheduler.get_lr()[0]
__magic_name__: List[Any] = optimizer.param_groups[0]["""lr"""]
__magic_name__: Any = epoch
__magic_name__: List[str] = overall_step
accelerator.print(f'epoch {epoch}:' , __UpperCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , """w""" ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def a ( ) -> Any:
__magic_name__: Tuple = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=__UpperCAmelCase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=__UpperCAmelCase , )
parser.add_argument(
"""--output_dir""" , type=__UpperCAmelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=__UpperCAmelCase , default=__UpperCAmelCase , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=__UpperCAmelCase , default=__UpperCAmelCase , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=__UpperCAmelCase , default=2 , help="""Number of train epochs.""" , )
__magic_name__: Optional[int] = parser.parse_args()
__magic_name__: Optional[Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6}
training_function(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
main()
| 96 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a_ ( ) -> Optional[Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__lowercase ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def a_ ( ) -> Optional[int]:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def a_ ( ) -> Dict:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__lowercase ):
http_head('https://huggingface.co' ) | 686 | 0 |
from collections.abc import Callable
def a ( snake_case__: Callable[[float], float] , snake_case__: float , snake_case__: float ):
'''simple docstring'''
lowercase_ = a
lowercase_ = b
if function(snake_case__ ) == 0: # one of the a or b is a root for the function
return a
elif function(snake_case__ ) == 0:
return b
elif (
function(snake_case__ ) * function(snake_case__ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
lowercase_ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(snake_case__ ) == 0:
return mid
elif function(snake_case__ ) * function(snake_case__ ) < 0:
lowercase_ = mid
else:
lowercase_ = mid
lowercase_ = start + (end - start) / 2.0
return mid
def a ( snake_case__: float ):
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 97 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCamelCase : Optional[int] = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
_lowerCamelCase : List[str] = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
_lowerCamelCase : Dict = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def A ( self : Union[str, Any] , lowercase : Tuple , lowercase : Optional[Any] , lowercase : int=None , lowercase : str=True , lowercase : List[str]=False ):
'''simple docstring'''
if rouge_types is None:
_snake_case = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
_snake_case = rouge_scorer.RougeScorer(rouge_types=lowercase , use_stemmer=lowercase )
if use_aggregator:
_snake_case = scoring.BootstrapAggregator()
else:
_snake_case = []
for ref, pred in zip(lowercase , lowercase ):
_snake_case = scorer.score(lowercase , lowercase )
if use_aggregator:
aggregator.add_scores(lowercase )
else:
scores.append(lowercase )
if use_aggregator:
_snake_case = aggregator.aggregate()
else:
_snake_case = {}
for key in scores[0]:
_snake_case = [score[key] for score in scores]
return result | 686 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowercase__ : Optional[int] = 50_00_00
lowercase__ , lowercase__ : List[str] = os.path.split(__file__)
lowercase__ : str = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def a__ ( lowercase : datasets.Dataset, **lowercase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = dataset.map(**lowercase )
@get_duration
def a__ ( lowercase : datasets.Dataset, **lowercase : Optional[Any] ) -> str:
"""simple docstring"""
_UpperCamelCase = dataset.filter(**lowercase )
def a__ ( ) -> Any:
"""simple docstring"""
_UpperCamelCase = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
_UpperCamelCase = generate_example_dataset(
os.path.join(lowercase, '''dataset.arrow''' ), lowercase, num_examples=lowercase )
_UpperCamelCase = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''', use_fast=lowercase )
def tokenize(lowercase : List[Any] ):
return tokenizer(examples['''text'''] )
_UpperCamelCase = map(lowercase )
_UpperCamelCase = map(lowercase, batched=lowercase )
_UpperCamelCase = map(lowercase, function=lambda lowercase : None, batched=lowercase )
with dataset.formatted_as(type='''numpy''' ):
_UpperCamelCase = map(lowercase, function=lambda lowercase : None, batched=lowercase )
with dataset.formatted_as(type='''pandas''' ):
_UpperCamelCase = map(lowercase, function=lambda lowercase : None, batched=lowercase )
with dataset.formatted_as(type='''torch''', columns='''numbers''' ):
_UpperCamelCase = map(lowercase, function=lambda lowercase : None, batched=lowercase )
with dataset.formatted_as(type='''tensorflow''', columns='''numbers''' ):
_UpperCamelCase = map(lowercase, function=lambda lowercase : None, batched=lowercase )
_UpperCamelCase = map(lowercase, function=lowercase, batched=lowercase )
_UpperCamelCase = filter(lowercase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowercase, '''wb''' ) as f:
f.write(json.dumps(lowercase ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 98 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = "swin2sr"
_UpperCAmelCase : Optional[int] = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[int] , lowercase : List[Any]=64 , lowercase : int=1 , lowercase : Union[str, Any]=3 , lowercase : Dict=180 , lowercase : List[Any]=[6, 6, 6, 6, 6, 6] , lowercase : Dict=[6, 6, 6, 6, 6, 6] , lowercase : List[Any]=8 , lowercase : List[str]=2.0 , lowercase : Tuple=True , lowercase : Union[str, Any]=0.0 , lowercase : Dict=0.0 , lowercase : Optional[int]=0.1 , lowercase : int="gelu" , lowercase : List[str]=False , lowercase : List[Any]=0.02 , lowercase : List[Any]=1E-5 , lowercase : Optional[int]=2 , lowercase : Tuple=1.0 , lowercase : List[Any]="1conv" , lowercase : List[Any]="pixelshuffle" , **lowercase : List[str] , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = embed_dim
_snake_case = depths
_snake_case = len(lowercase )
_snake_case = num_heads
_snake_case = window_size
_snake_case = mlp_ratio
_snake_case = qkv_bias
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = drop_path_rate
_snake_case = hidden_act
_snake_case = use_absolute_embeddings
_snake_case = layer_norm_eps
_snake_case = initializer_range
_snake_case = upscale
_snake_case = img_range
_snake_case = resi_connection
_snake_case = upsampler | 686 | 0 |
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCAmelCase__ , int(b / 2 ) ) * actual_power(lowerCAmelCase__ , int(b / 2 ) )
else:
return a * actual_power(lowerCAmelCase__ , int(b / 2 ) ) * actual_power(lowerCAmelCase__ , int(b / 2 ) )
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
if b < 0:
return 1 / actual_power(lowerCAmelCase__ , lowerCAmelCase__ )
return actual_power(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
print(power(-2, -3))
| 99 |
import random
def a_ ( __lowercase : str , __lowercase : Any , __lowercase : Any ) -> Optional[Any]:
_snake_case = a[left_index]
_snake_case = left_index + 1
for j in range(left_index + 1 , __lowercase ):
if a[j] < pivot:
_snake_case , _snake_case = a[i], a[j]
i += 1
_snake_case , _snake_case = a[i - 1], a[left_index]
return i - 1
def a_ ( __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Optional[int] ) -> Tuple:
if left < right:
_snake_case = random.randint(__lowercase , right - 1 )
_snake_case , _snake_case = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_snake_case = partition(__lowercase , __lowercase , __lowercase )
quick_sort_random(
__lowercase , __lowercase , __lowercase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__lowercase , pivot_index + 1 , __lowercase ) # recursive quicksort to the right of the pivot point
def a_ ( ) -> str:
_snake_case = input('Enter numbers separated by a comma:\n' ).strip()
_snake_case = [int(__lowercase ) for item in user_input.split(',' )]
quick_sort_random(__lowercase , 0 , len(__lowercase ) )
print(__lowercase )
if __name__ == "__main__":
main() | 686 | 0 |
def __snake_case ( lowerCAmelCase_ ) -> list:
if n_term == "":
return []
SCREAMING_SNAKE_CASE__ = []
for temp in range(int(lowerCAmelCase_ ) ):
series.append(f'''1/{temp + 1}''' if series else '''1''' )
return series
if __name__ == "__main__":
_A : List[Any] = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 100 |
import math
def a_ ( __lowercase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a_ ( __lowercase : float = 0.1 ) -> int:
_snake_case = 3
_snake_case = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__lowercase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Tuple =logging.get_logger(__name__)
lowerCAmelCase__ : Optional[int] ={
'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json',
}
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """git_vision_model"""
def __init__( self , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3 , lowerCAmelCase__=2_2_4 , lowerCAmelCase__=1_6 , lowerCAmelCase__="quick_gelu" , lowerCAmelCase__=1E-5 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , **lowerCAmelCase__ , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE_ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Dict = num_attention_heads
SCREAMING_SNAKE_CASE_ : Tuple = num_channels
SCREAMING_SNAKE_CASE_ : Tuple = patch_size
SCREAMING_SNAKE_CASE_ : Any = image_size
SCREAMING_SNAKE_CASE_ : Dict = initializer_range
SCREAMING_SNAKE_CASE_ : List[str] = attention_dropout
SCREAMING_SNAKE_CASE_ : Any = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_act
@classmethod
def UpperCamelCase__ ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """git"""
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=3_0_5_2_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=6 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=1_0_1 , lowerCAmelCase__=1_0_2 , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
"""simple docstring"""
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
if vision_config is None:
SCREAMING_SNAKE_CASE_ : Dict = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
SCREAMING_SNAKE_CASE_ : str = GitVisionConfig(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Any = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : int = layer_norm_eps
SCREAMING_SNAKE_CASE_ : List[str] = position_embedding_type
SCREAMING_SNAKE_CASE_ : List[str] = use_cache
SCREAMING_SNAKE_CASE_ : Any = tie_word_embeddings
SCREAMING_SNAKE_CASE_ : Dict = num_image_with_embedding
SCREAMING_SNAKE_CASE_ : List[str] = bos_token_id
SCREAMING_SNAKE_CASE_ : str = eos_token_id
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ : str = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.__class__.model_type
return output
| 101 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = "resnet"
_UpperCAmelCase : Any = ["basic", "bottleneck"]
def __init__( self : Union[str, Any] , lowercase : Dict=3 , lowercase : Any=64 , lowercase : Any=[256, 512, 1_024, 2_048] , lowercase : Dict=[3, 4, 6, 3] , lowercase : Any="bottleneck" , lowercase : Optional[Any]="relu" , lowercase : Dict=False , lowercase : str=None , lowercase : Tuple=None , **lowercase : List[Any] , ):
'''simple docstring'''
super().__init__(**lowercase )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
_snake_case = num_channels
_snake_case = embedding_size
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = layer_type
_snake_case = hidden_act
_snake_case = downsample_in_first_stage
_snake_case = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(lowercase ) + 1 )]
_snake_case , _snake_case = get_aligned_output_features_output_indices(
out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = version.parse("1.11" )
@property
def A ( self : int ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
return 1E-3 | 686 | 0 |
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Dict = sorted(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , key=lambda SCREAMING_SNAKE_CASE : x[0] / x[1] , reverse=SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase : int = [i[0] for i in r], [i[1] for i in r]
UpperCamelCase : Optional[Any] = list(accumulate(SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Optional[Any] = bisect(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : List[Any] , lowercase : Union[str, Any] , lowercase : int ):
'''simple docstring'''
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(lowercase ) for s in shape] )}.npy'''
def A ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def A ( self : List[Any] , lowercase : Tuple=0 , lowercase : Optional[int]=(4, 4, 64, 64) , lowercase : Optional[int]=False ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase )
return image
def A ( self : Tuple , lowercase : Any=False , lowercase : Union[str, Any]="CompVis/stable-diffusion-v1-4" ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = 'bf16' if fpaa else None
_snake_case , _snake_case = FlaxUNetaDConditionModel.from_pretrained(
lowercase , subfolder='unet' , dtype=lowercase , revision=lowercase )
return model, params
def A ( self : Union[str, Any] , lowercase : str=0 , lowercase : Optional[Any]=(4, 77, 768) , lowercase : int=False ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def A ( self : Tuple , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : List[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=lowercase )
_snake_case = self.get_latents(lowercase , fpaa=lowercase )
_snake_case = self.get_encoder_hidden_states(lowercase , fpaa=lowercase )
_snake_case = model.apply(
{'params': params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
_snake_case = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_snake_case = jnp.array(lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def A ( self : str , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : List[str] ):
'''simple docstring'''
_snake_case , _snake_case = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=lowercase )
_snake_case = self.get_latents(lowercase , shape=(4, 4, 96, 96) , fpaa=lowercase )
_snake_case = self.get_encoder_hidden_states(lowercase , shape=(4, 77, 1_024) , fpaa=lowercase )
_snake_case = model.apply(
{'params': params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
_snake_case = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_snake_case = jnp.array(lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1E-2 ) | 686 | 0 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Any = (DDPMScheduler,)
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Any ):
"""simple docstring"""
_snake_case = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__lowerCamelCase )
return config
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
self.check_over_configs(thresholding=__lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCamelCase , prediction_type=__lowerCamelCase , sample_max_value=__lowerCamelCase , )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**__lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.0_2 ) ) < 1E-5
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**__lowerCamelCase )
_snake_case = len(__lowerCamelCase )
_snake_case = self.dummy_model()
_snake_case = self.dummy_sample_deter
_snake_case = torch.manual_seed(0 )
for t in reversed(range(__lowerCamelCase ) ):
# 1. predict noise residual
_snake_case = model(__lowerCamelCase , __lowerCamelCase )
# 2. predict previous mean of sample x_t-1
_snake_case = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_snake_case = pred_prev_sample
_snake_case = torch.sum(torch.abs(__lowerCamelCase ) )
_snake_case = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config(prediction_type='''v_prediction''' )
_snake_case = scheduler_class(**__lowerCamelCase )
_snake_case = len(__lowerCamelCase )
_snake_case = self.dummy_model()
_snake_case = self.dummy_sample_deter
_snake_case = torch.manual_seed(0 )
for t in reversed(range(__lowerCamelCase ) ):
# 1. predict noise residual
_snake_case = model(__lowerCamelCase , __lowerCamelCase )
# 2. predict previous mean of sample x_t-1
_snake_case = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_snake_case = pred_prev_sample
_snake_case = torch.sum(torch.abs(__lowerCamelCase ) )
_snake_case = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**__lowerCamelCase )
_snake_case = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=__lowerCamelCase )
_snake_case = scheduler.timesteps
for i, timestep in enumerate(__lowerCamelCase ):
if i == len(__lowerCamelCase ) - 1:
_snake_case = -1
else:
_snake_case = timesteps[i + 1]
_snake_case = scheduler.previous_timestep(__lowerCamelCase )
_snake_case = prev_t.item()
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**__lowerCamelCase )
_snake_case = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(__lowerCamelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**__lowerCamelCase )
_snake_case = [1_0_0, 8_7, 5_0, 1, 0]
_snake_case = len(__lowerCamelCase )
with self.assertRaises(__lowerCamelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=__lowerCamelCase , timesteps=__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**__lowerCamelCase )
_snake_case = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__lowerCamelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__lowerCamelCase )
| 103 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( __lowercase : Any ) -> List[Any]:
_snake_case = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a_ ( __lowercase : Dict ) -> Tuple:
_snake_case , _snake_case = emb.weight.shape
_snake_case = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_snake_case = emb.weight.data
return lin_layer
def a_ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None ) -> Tuple:
_snake_case = {}
for old_key in state_dict.keys():
_snake_case = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_snake_case = key.replace('moe_layer.experts.0' , f'''ffn.experts.expert_{expert_idx}''' )
else:
_snake_case = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
_snake_case = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
_snake_case = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
_snake_case = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
_snake_case = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
_snake_case = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
_snake_case = key.replace('final_layer_norm' , 'ff_layer_norm' )
_snake_case = state_dict[old_key]
return new_dict
def a_ ( __lowercase : Optional[Any] , __lowercase : Tuple , __lowercase : Any , __lowercase : List[str] , __lowercase : str = WEIGHTS_NAME ) -> Union[str, Any]:
_snake_case = []
_snake_case = 0
os.makedirs(__lowercase , exist_ok=__lowercase )
for expert in range(__lowercase ):
_snake_case = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(__lowercase ):
_snake_case = torch.load(__lowercase )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = os.path.join(
__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
torch.save(__lowercase , __lowercase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__lowercase )[0]].dtype )
# Add the last block
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
_snake_case = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__lowercase ) == 1:
_snake_case = os.path.join(__lowercase , __lowercase )
torch.save(__lowercase , __lowercase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__lowercase , __lowercase )
# Otherwise, let's build the index
_snake_case = {}
for idx, shard in enumerate(__lowercase ):
_snake_case = weights_name.replace('.bin' , f'''-{idx+1:05d}-of-{len(__lowercase ):05d}.bin''' )
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__lowercase , os.path.join(__lowercase , __lowercase ) )
for key in shard:
_snake_case = shard_file
# Add the metadata
_snake_case = {'total_size': total_size}
_snake_case = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(__lowercase , __lowercase ) , 'w' , encoding='utf-8' ) as f:
_snake_case = json.dumps(__lowercase , indent=2 , sort_keys=__lowercase ) + '\n'
f.write(__lowercase )
return metadata, index
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
_lowerCamelCase : List[str] = parser.parse_args()
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_lowerCamelCase : Tuple = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_lowerCamelCase : Dict = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path) | 686 | 0 |
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> int:
"""simple docstring"""
assert (
isinstance(UpperCAmelCase_, UpperCAmelCase_ ) and number_of_steps > 0
), F"""number_of_steps needs to be positive integer, your input {number_of_steps}"""
if number_of_steps == 1:
return 1
A__ , A__ = 1, 1
for _ in range(number_of_steps - 1 ):
A__ , A__ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_lowerCamelCase : List[Any] = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_lowerCamelCase : Any = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_lowerCamelCase : Union[str, Any] = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def a_ ( __lowercase : List[Any] , __lowercase : Any ) -> Union[str, Any]:
return float((preds == labels).mean() )
def a_ ( __lowercase : Optional[Any] , __lowercase : List[str] ) -> Dict:
_snake_case = simple_accuracy(__lowercase , __lowercase )
_snake_case = float(fa_score(y_true=__lowercase , y_pred=__lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( __lowercase : int , __lowercase : str ) -> str:
_snake_case = float(pearsonr(__lowercase , __lowercase )[0] )
_snake_case = float(spearmanr(__lowercase , __lowercase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def A ( self : List[Any] , lowercase : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowercase , lowercase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowercase , lowercase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowercase , lowercase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' ) | 686 | 0 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : List[str] = F'Input value of [number={number}] must be an integer'
raise TypeError(lowerCamelCase_ )
if number < 1:
SCREAMING_SNAKE_CASE_ : Optional[int] = F'Input value of [number={number}] must be > 0'
raise ValueError(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = 1
for i in range(1 , lowerCamelCase_ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_lowerCamelCase : Dict = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = "sequence-classification"
def __init__( self : Optional[int] , lowercase : Any ):
'''simple docstring'''
if type(lowercase ) == dict:
_snake_case = Namespace(**lowercase )
_snake_case = glue_output_modes[hparams.task]
_snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(lowercase , lowercase , self.mode )
def A ( self : Optional[Any] , **lowercase : Optional[Any] ):
'''simple docstring'''
return self.model(**lowercase )
def A ( self : Optional[Any] , lowercase : str , lowercase : Tuple ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case = outputs[0]
_snake_case = self.trainer.lr_schedulers[0]['scheduler']
_snake_case = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.hparams
_snake_case = processors[args.task]()
_snake_case = processor.get_labels()
for mode in ["train", "dev"]:
_snake_case = self._feature_file(lowercase )
if os.path.exists(lowercase ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , lowercase )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_snake_case = convert_examples_to_features(
lowercase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , lowercase )
torch.save(lowercase , lowercase )
def A ( self : Dict , lowercase : str , lowercase : int , lowercase : bool = False ):
'''simple docstring'''
_snake_case = 'dev' if mode == 'test' else mode
_snake_case = self._feature_file(lowercase )
logger.info('Loading features from cached file %s' , lowercase )
_snake_case = torch.load(lowercase )
_snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(lowercase , lowercase , lowercase , lowercase ) , batch_size=lowercase , shuffle=lowercase , )
def A ( self : str , lowercase : Optional[Any] , lowercase : str ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case , _snake_case = outputs[:2]
_snake_case = logits.detach().cpu().numpy()
_snake_case = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : int , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_snake_case = np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_snake_case = np.argmax(lowercase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_snake_case = np.squeeze(lowercase )
_snake_case = np.concatenate([x['target'] for x in outputs] , axis=0 )
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , lowercase , lowercase )}
_snake_case = dict(results.items() )
_snake_case = results
return ret, preds_list, out_label_list
def A ( self : int , lowercase : list ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : List[str] , lowercase : Any ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( lowercase : Tuple , lowercase : Any ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(lowercase , lowercase )
parser.add_argument(
'--max_seq_length' , default=128 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=lowercase , required=lowercase , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=lowercase , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def a_ ( ) -> Union[str, Any]:
_snake_case = argparse.ArgumentParser()
add_generic_args(__lowercase , os.getcwd() )
_snake_case = GLUETransformer.add_model_specific_args(__lowercase , os.getcwd() )
_snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_snake_case = os.path.join(
'./results' , f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_snake_case = GLUETransformer(__lowercase )
_snake_case = generic_train(__lowercase , __lowercase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_snake_case = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=__lowercase ) )
_snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__lowercase )
if __name__ == "__main__":
main() | 686 | 0 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__snake_case :str =logging.get_logger(__name__)
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : int = ['input_features', 'attention_mask']
def __init__( self : Tuple , __UpperCamelCase : List[Any]=80 , __UpperCamelCase : int=16_000 , __UpperCamelCase : Tuple=0.0 , __UpperCamelCase : Optional[Any]=10 , __UpperCamelCase : int=25 , __UpperCamelCase : str="hamming_window" , __UpperCamelCase : List[str]=3_2_7_6_8.0 , __UpperCamelCase : Optional[int]=0.9_7 , __UpperCamelCase : List[Any]=1.0 , __UpperCamelCase : int=True , __UpperCamelCase : str=True , __UpperCamelCase : Optional[Any]=False , **__UpperCamelCase : Union[str, Any] , ) -> Tuple:
super().__init__(feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , **__UpperCamelCase )
A = feature_size
A = sampling_rate
A = padding_value
A = hop_length
A = win_length
A = frame_signal_scale
A = preemphasis_coeff
A = mel_floor
A = normalize_means
A = normalize_vars
A = win_function
A = return_attention_mask
A = win_length * sampling_rate // 1_000
A = hop_length * sampling_rate // 1_000
A = optimal_fft_length(self.sample_size )
A = (self.n_fft // 2) + 1
def __UpperCamelCase ( self : Tuple , __UpperCamelCase : np.array ) -> np.ndarray:
if self.win_function == "hamming_window":
A = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCamelCase )
else:
A = window_function(window_length=self.sample_size , name=self.win_function )
A = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
A = spectrogram(
one_waveform * self.frame_signal_scale , window=__UpperCamelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__UpperCamelCase , preemphasis=self.preemphasis_coeff , mel_filters=__UpperCamelCase , mel_floor=self.mel_floor , log_mel='log' , )
return msfc_features.T
def __UpperCamelCase ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Tuple ) -> Optional[int]:
# make sure we normalize float32 arrays
if self.normalize_means:
A = x[:input_length].mean(axis=0 )
A = np.subtract(__UpperCamelCase , __UpperCamelCase )
if self.normalize_vars:
A = x[:input_length].std(axis=0 )
A = np.divide(__UpperCamelCase , __UpperCamelCase )
if input_length < x.shape[0]:
A = padding_value
# make sure array is in float32
A = x.astype(np.floataa )
return x
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : List[np.ndarray] , __UpperCamelCase : Optional[np.ndarray] = None ) -> List[np.ndarray]:
A = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__UpperCamelCase , __UpperCamelCase , self.padding_value ) for x, n in zip(__UpperCamelCase , __UpperCamelCase )]
def __call__( self : Union[str, Any] , __UpperCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __UpperCamelCase : Union[bool, str, PaddingStrategy] = False , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : Optional[int] = None , **__UpperCamelCase : List[str] , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
A = isinstance(__UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
A = is_batched_numpy or (
isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ):
A = np.asarray(__UpperCamelCase , dtype=np.floataa )
elif isinstance(__UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A = [raw_speech]
# extract fbank features
A = [self._extract_mfsc_features(__UpperCamelCase ) for one_waveform in raw_speech]
# convert into correct format for padding
A = BatchFeature({'input_features': features} )
A = self.pad(
__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
# make sure list is in array format
A = padded_inputs.get('input_features' )
if isinstance(input_features[0] , __UpperCamelCase ):
A = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for feature in input_features]
A = padded_inputs.get('attention_mask' )
if attention_mask is not None:
A = [np.asarray(__UpperCamelCase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
A = (
np.array(__UpperCamelCase , dtype=np.intaa )
if self._get_padding_strategies(__UpperCamelCase , max_length=__UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
A = self.normalize(
padded_inputs['input_features'] , attention_mask=__UpperCamelCase )
if return_tensors is not None:
A = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs | 106 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = LEDConfig
_UpperCAmelCase : int = {}
_UpperCAmelCase : List[str] = "gelu"
def __init__( self : Union[str, Any] , lowercase : Optional[int] , lowercase : Dict=13 , lowercase : Dict=7 , lowercase : Tuple=True , lowercase : Dict=False , lowercase : Dict=99 , lowercase : Any=32 , lowercase : List[Any]=2 , lowercase : List[str]=4 , lowercase : List[str]=37 , lowercase : Dict=0.1 , lowercase : int=0.1 , lowercase : List[Any]=20 , lowercase : int=2 , lowercase : Optional[Any]=1 , lowercase : List[str]=0 , lowercase : Optional[int]=4 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = eos_token_id
_snake_case = pad_token_id
_snake_case = bos_token_id
_snake_case = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case = prepare_led_inputs_dict(lowercase , lowercase , lowercase )
_snake_case = tf.concat(
[tf.zeros_like(lowercase )[:, :-1], tf.ones_like(lowercase )[:, -1:]] , axis=-1 , )
_snake_case = global_attention_mask
return config, inputs_dict
def A ( self : str , lowercase : str , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = TFLEDModel(config=lowercase ).get_decoder()
_snake_case = inputs_dict['input_ids']
_snake_case = input_ids[:1, :]
_snake_case = inputs_dict['attention_mask'][:1, :]
_snake_case = 1
# first forward pass
_snake_case = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
_snake_case , _snake_case = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case = model(lowercase , attention_mask=lowercase )[0]
_snake_case = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case = output_from_no_past[:, -3:, random_slice_idx]
_snake_case = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : str=None , ) -> Union[str, Any]:
if attention_mask is None:
_snake_case = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_UpperCAmelCase : Optional[int] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_UpperCAmelCase : Tuple = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCAmelCase : str = True
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : str = False
_UpperCAmelCase : List[Any] = False
def A ( self : Any ):
'''simple docstring'''
_snake_case = TFLEDModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = tf.zeros_like(inputs_dict['attention_mask'] )
_snake_case = 2
_snake_case = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
_snake_case = True
_snake_case = self.model_tester.seq_length
_snake_case = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase : List[str] ):
_snake_case = outputs.decoder_attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase : List[str] ):
_snake_case = [t.numpy() for t in outputs.encoder_attentions]
_snake_case = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = False
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
_snake_case = len(lowercase )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
if self.is_encoder_decoder:
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_decoder_attentions_output(lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
# Check attention is always last and order is fine
_snake_case = True
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase ) )
self.assertEqual(model.config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def A ( self : List[Any] ):
'''simple docstring'''
pass
def A ( self : Any ):
'''simple docstring'''
pass
def a_ ( __lowercase : str ) -> Optional[Any]:
return tf.constant(__lowercase , dtype=tf.intaa )
_lowerCamelCase : List[Any] = 1E-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 )
def A ( self : str ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 , rtol=1E-3 ) | 686 | 0 |
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
_UpperCAmelCase : Dict = '''src/transformers'''
# Matches is_xxx_available()
_UpperCAmelCase : Optional[int] = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
_UpperCAmelCase : Tuple = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_UpperCAmelCase : Union[str, Any] = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
_UpperCAmelCase : int = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
_UpperCAmelCase : int = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_UpperCAmelCase : List[str] = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
_UpperCAmelCase : Any = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
_UpperCAmelCase : str = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
_UpperCAmelCase : Tuple = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
_UpperCAmelCase : Optional[int] = re.compile(r'''^\s*try:''')
# Catches a line with else:
_UpperCAmelCase : Dict = re.compile(r'''^\s*else:''')
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] ):
if _re_test_backend.search(__snake_case ) is None:
return None
_A = [b[0] for b in _re_backend.findall(__snake_case )]
backends.sort()
return "_and_".join(__snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] ):
with open(__snake_case , 'r' , encoding='utf-8' , newline='\n' ) as f:
_A = f.readlines()
_A = 0
while line_index < len(__snake_case ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
_A = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
_A = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__snake_case ):
_A = _re_one_line_import_struct.search(__snake_case ).groups()[0]
_A = re.findall('\[([^\]]+)\]' , __snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
_A = _re_import_struct_key_value.search(__snake_case )
if single_line_import_search is not None:
_A = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(__snake_case ) > 0]
objects.extend(__snake_case )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
_A = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
_A = lines[line_index]
if _re_import_struct_add_one.search(__snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(__snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(__snake_case ) is not None:
_A = _re_import_struct_add_many.search(__snake_case ).groups()[0].split(', ' )
_A = [obj[1:-1] for obj in imports if len(__snake_case ) > 0]
objects.extend(__snake_case )
elif _re_between_brackets.search(__snake_case ) is not None:
_A = _re_between_brackets.search(__snake_case ).groups()[0].split(', ' )
_A = [obj[1:-1] for obj in imports if len(__snake_case ) > 0]
objects.extend(__snake_case )
elif _re_quote_object.search(__snake_case ) is not None:
objects.append(_re_quote_object.search(__snake_case ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 1_2 + '"' ):
objects.append(line[1_3:-3] )
line_index += 1
_A = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_A = []
while (
line_index < len(__snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
_A = lines[line_index]
_A = _re_import.search(__snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_A = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(__snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
_A = lines[line_index]
_A = _re_import.search(__snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
_A = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _SCREAMING_SNAKE_CASE ( __snake_case : int , __snake_case : Tuple ):
def find_duplicates(__snake_case : Any ):
return [k for k, v in collections.Counter(__snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_A = []
for key in import_dict_objects.keys():
_A = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
_A = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_A = 'base imports' if key == 'none' else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def _SCREAMING_SNAKE_CASE ( ):
_A = []
for root, _, files in os.walk(__snake_case ):
if "__init__.py" in files:
_A = os.path.join(__snake_case , '__init__.py' )
_A = parse_init(__snake_case )
if objects is not None:
_A = analyze_results(*__snake_case )
if len(__snake_case ) > 0:
_A = F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('\n'.join(__snake_case ) )
if len(__snake_case ) > 0:
raise ValueError('\n\n'.join(__snake_case ) )
def _SCREAMING_SNAKE_CASE ( ):
_A = []
for path, directories, files in os.walk(__snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(__snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__snake_case ) / folder).glob('*.py' ) ) ) == 0:
continue
_A = str((Path(__snake_case ) / folder).relative_to(__snake_case ) )
_A = short_path.replace(os.path.sep , '.' )
submodules.append(__snake_case )
for fname in files:
if fname == "__init__.py":
continue
_A = str((Path(__snake_case ) / fname).relative_to(__snake_case ) )
_A = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(__snake_case )
return submodules
_UpperCAmelCase : Tuple = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _SCREAMING_SNAKE_CASE ( ):
# This is to make sure the transformers module imported is the one in the repo.
_A = importlib.util.spec_from_file_location(
'transformers' , os.path.join(__snake_case , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
_A = spec.loader.load_module()
_A = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__snake_case ) > 0:
_A = '\n'.join(F'- {module}' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F'{list_of_modules}\n'
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 107 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_lowerCamelCase : Union[str, Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_lowerCamelCase : Union[str, Any] = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
_lowerCamelCase : Optional[int] = '''zero2'''
_lowerCamelCase : List[Any] = '''zero3'''
_lowerCamelCase : Dict = [ZEROa, ZEROa]
def a_ ( __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Tuple ) -> Dict:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_snake_case = parameterized.to_safe_name('_'.join(str(__lowercase ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
_lowerCamelCase : Dict = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : List[Any] , lowercase : Dict ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Any , lowercase : str , lowercase : List[str] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : Optional[Any] , lowercase : Optional[int] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
def A ( self : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
pass
def A ( self : str , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : bool = True , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = models[model]
_snake_case = self.run_trainer(
stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , )
self.do_checks(lowercase )
return output_dir
def A ( self : Any , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : int = 1 , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase )
_snake_case = f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_snake_case = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_snake_case = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_snake_case = self.get_launcher(lowercase )
_snake_case = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase , env=self.get_env() )
return output_dir
def A ( self : List[str] , lowercase : Any=False ):
'''simple docstring'''
_snake_case = min(2 , get_gpu_count() ) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split() | 686 | 0 |
import numpy as np
from transformers import Pipeline
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Union[str, Any]:
_UpperCAmelCase = np.max(__snake_case , axis=-1 , keepdims=__snake_case )
_UpperCAmelCase = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__snake_case )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def lowerCamelCase ( self : List[Any] , **lowerCamelCase : Any ) -> Any:
"""simple docstring"""
_UpperCAmelCase = {}
if "second_text" in kwargs:
_UpperCAmelCase = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : Any=None ) -> Dict:
"""simple docstring"""
return self.tokenizer(lowerCamelCase , text_pair=lowerCamelCase , return_tensors=self.framework )
def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : Any ) -> Dict:
"""simple docstring"""
return self.model(**lowerCamelCase )
def lowerCamelCase ( self : List[str] , lowerCamelCase : List[Any] ) -> int:
"""simple docstring"""
_UpperCAmelCase = model_outputs.logits[0].numpy()
_UpperCAmelCase = softmax(lowerCamelCase )
_UpperCAmelCase = np.argmax(lowerCamelCase )
_UpperCAmelCase = self.model.config.idalabel[best_class]
_UpperCAmelCase = probabilities[best_class].item()
_UpperCAmelCase = logits.tolist()
return {"label": label, "score": score, "logits": logits} | 108 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCamelCase : int = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 0 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class __a ( datasets.BeamBasedBuilder ):
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) ,supervised_keys=lowerCamelCase ,)
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Tuple ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCamelCase )
class __a ( datasets.BeamBasedBuilder ):
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) ,supervised_keys=lowerCamelCase ,)
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ):
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCamelCase )
def __magic_name__ ( ) -> Dict:
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def __magic_name__ ( ) -> int:
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class __a ( _snake_case ):
@require_beam
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__SCREAMING_SNAKE_CASE = DummyBeamDataset(cache_dir=lowerCamelCase ,beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCamelCase ,builder.name ,"""default""" ,"""0.0.0""" ,f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features ,datasets.Features({"""content""": datasets.Value("""string""" )} ) )
__SCREAMING_SNAKE_CASE = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows ,lowerCamelCase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples ,lowerCamelCase )
self.assertDictEqual(dset["""train"""][0] ,get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] ,get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase ,builder.name ,"""default""" ,"""0.0.0""" ,"""dataset_info.json""" ) ) )
del dset
@require_beam
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
import apache_beam as beam
__SCREAMING_SNAKE_CASE = beam.io.parquetio.WriteToParquet
__SCREAMING_SNAKE_CASE = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__SCREAMING_SNAKE_CASE = DummyBeamDataset(cache_dir=lowerCamelCase ,beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
__SCREAMING_SNAKE_CASE = partial(lowerCamelCase ,num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
lowerCamelCase ,builder.name ,"""default""" ,"""0.0.0""" ,f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
lowerCamelCase ,builder.name ,"""default""" ,"""0.0.0""" ,f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features ,datasets.Features({"""content""": datasets.Value("""string""" )} ) )
__SCREAMING_SNAKE_CASE = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows ,lowerCamelCase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples ,lowerCamelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) ,sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase ,builder.name ,"""default""" ,"""0.0.0""" ,"""dataset_info.json""" ) ) )
del dset
@require_beam
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__SCREAMING_SNAKE_CASE = DummyBeamDataset(cache_dir=lowerCamelCase )
self.assertRaises(datasets.builder.MissingBeamOptions ,builder.download_and_prepare )
@require_beam
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__SCREAMING_SNAKE_CASE = NestedBeamDataset(cache_dir=lowerCamelCase ,beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCamelCase ,builder.name ,"""default""" ,"""0.0.0""" ,f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features ,datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
__SCREAMING_SNAKE_CASE = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows ,lowerCamelCase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples ,lowerCamelCase )
self.assertDictEqual(dset["""train"""][0] ,get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] ,get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase ,builder.name ,"""default""" ,"""0.0.0""" ,"""dataset_info.json""" ) ) )
del dset
| 109 |
import random
from .binary_exp_mod import bin_exp_mod
def a_ ( __lowercase : int , __lowercase : Any=1_000 ) -> int:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_snake_case = n - 1
_snake_case = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_snake_case = 0
while count < prec:
_snake_case = random.randint(2 , n - 1 )
_snake_case = bin_exp_mod(__lowercase , __lowercase , __lowercase )
if b != 1:
_snake_case = True
for _ in range(__lowercase ):
if b == n - 1:
_snake_case = False
break
_snake_case = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_lowerCamelCase : Tuple = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i))) | 686 | 0 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=1_3 , A=3_0 , A=2 , A=3 , A=True , A=True , A=3_2 , A=2 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=1_0 , A=0.02 , A=3 , A=None , ) -> Optional[Any]:
snake_case : Tuple = parent
snake_case : Tuple = batch_size
snake_case : Any = image_size
snake_case : Optional[Any] = patch_size
snake_case : Union[str, Any] = num_channels
snake_case : Tuple = is_training
snake_case : List[str] = use_labels
snake_case : Any = hidden_size
snake_case : Optional[int] = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : Any = hidden_act
snake_case : Any = hidden_dropout_prob
snake_case : Optional[Any] = attention_probs_dropout_prob
snake_case : str = type_sequence_label_size
snake_case : Tuple = initializer_range
snake_case : List[str] = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case : List[Any] = (image_size // patch_size) ** 2
snake_case : str = num_patches + 1
def UpperCAmelCase ( self ) -> int:
snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : int = None
if self.use_labels:
snake_case : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : List[str] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ) -> Any:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self , A , A , A ) -> Any:
snake_case : Optional[int] = TFViTModel(config=A )
snake_case : Union[str, Any] = model(A , training=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
snake_case : List[Any] = self.image_size // 2
snake_case : Optional[int] = pixel_values[:, :, :image_size, :image_size]
snake_case : str = model(A , interpolate_pos_encoding=A , training=A )
snake_case : List[Any] = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def UpperCAmelCase ( self , A , A , A ) -> Optional[Any]:
snake_case : Union[str, Any] = self.type_sequence_label_size
snake_case : List[Any] = TFViTForImageClassification(A )
snake_case : List[Any] = model(A , labels=A , training=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
snake_case : Optional[int] = self.image_size // 2
snake_case : Optional[Any] = pixel_values[:, :, :image_size, :image_size]
snake_case : Tuple = model(A , interpolate_pos_encoding=A , training=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case : List[str] = 1
snake_case : Optional[Any] = TFViTForImageClassification(A )
snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case : Any = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self ) -> int:
snake_case : Union[str, Any] = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case : Tuple = config_and_inputs
snake_case : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __lowercase (UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_snake_case = (
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
_snake_case = False
_snake_case = False
_snake_case = False
def UpperCAmelCase ( self ) -> Dict:
snake_case : Optional[Any] = TFViTModelTester(self )
snake_case : List[str] = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=3_7 )
def UpperCAmelCase ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def UpperCAmelCase ( self ) -> Tuple:
pass
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case , snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Any = model_class(A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A , tf.keras.layers.Layer ) )
def UpperCAmelCase ( self ) -> Any:
snake_case , snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : int = model_class(A )
snake_case : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : Tuple = [*signature.parameters.keys()]
snake_case : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
def UpperCAmelCase ( self ) -> Any:
snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : int = TFViTModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(A )
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
snake_case : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ) -> Any:
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Optional[Any] = TFViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" )
snake_case : Optional[int] = self.default_image_processor
snake_case : List[Any] = prepare_img()
snake_case : Dict = image_processor(images=A , return_tensors="""tf""" )
# forward pass
snake_case : Dict = model(**A )
# verify the logits
snake_case : Optional[int] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , A )
snake_case : Optional[int] = tf.constant([-0.27_44, 0.82_15, -0.08_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , A , atol=1e-4 )
| 587 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_lowerCamelCase : int = re.compile(r'''\s+''')
def a_ ( __lowercase : List[Any] ) -> int:
return {"hash": hashlib.mda(re.sub(__lowercase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def a_ ( __lowercase : List[Any] ) -> Dict:
_snake_case = [len(__lowercase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(__lowercase ), "line_max": max(__lowercase )}
def a_ ( __lowercase : Optional[int] ) -> List[str]:
_snake_case = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] ) -> Optional[int]:
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def a_ ( __lowercase : Union[str, Any] , __lowercase : int=5 ) -> Optional[Any]:
_snake_case = ['auto-generated', 'autogenerated', 'automatically generated']
_snake_case = example['content'].splitlines()
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a_ ( __lowercase : List[Any] , __lowercase : int=5 , __lowercase : Tuple=0.0_5 ) -> Union[str, Any]:
_snake_case = ['unit tests', 'test file', 'configuration file']
_snake_case = example['content'].splitlines()
_snake_case = 0
_snake_case = 0
# first test
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_snake_case = example['content'].count('\n' )
_snake_case = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a_ ( __lowercase : Union[str, Any] ) -> Any:
_snake_case = ['def ', 'class ', 'for ', 'while ']
_snake_case = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a_ ( __lowercase : Tuple , __lowercase : Any=4 ) -> List[str]:
_snake_case = example['content'].splitlines()
_snake_case = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a_ ( __lowercase : Dict ) -> Dict:
_snake_case = tokenizer(example['content'] , truncation=__lowercase )['input_ids']
_snake_case = len(example['content'] ) / len(__lowercase )
return {"ratio": ratio}
def a_ ( __lowercase : Optional[Any] ) -> Any:
_snake_case = {}
results.update(get_hash(__lowercase ) )
results.update(line_stats(__lowercase ) )
results.update(alpha_stats(__lowercase ) )
results.update(char_token_ratio(__lowercase ) )
results.update(is_autogenerated(__lowercase ) )
results.update(is_config_or_test(__lowercase ) )
results.update(has_no_keywords(__lowercase ) )
results.update(has_few_assignments(__lowercase ) )
return results
def a_ ( __lowercase : Optional[int] , __lowercase : str , __lowercase : List[Any] ) -> int:
if not check_uniques(__lowercase , __lowercase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a_ ( __lowercase : Dict ) -> Dict:
with open(__lowercase , 'rb' ) as f_in:
with gzip.open(str(__lowercase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(__lowercase , __lowercase )
os.unlink(__lowercase )
# Settings
_lowerCamelCase : Dict = HfArgumentParser(PreprocessingArguments)
_lowerCamelCase : Dict = parser.parse_args()
if args.num_workers is None:
_lowerCamelCase : int = multiprocessing.cpu_count()
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_lowerCamelCase : Any = time.time()
_lowerCamelCase : Optional[Any] = load_dataset(args.dataset_name, split='''train''')
print(F'Time to load dataset: {time.time()-t_start:.2f}')
# Run preprocessing
_lowerCamelCase : Optional[int] = time.time()
_lowerCamelCase : Union[str, Any] = ds.map(preprocess, num_proc=args.num_workers)
print(F'Time to preprocess dataset: {time.time()-t_start:.2f}')
# Deduplicate hashes
_lowerCamelCase : List[Any] = set(ds.unique('''hash'''))
_lowerCamelCase : Dict = len(uniques) / len(ds)
print(F'Fraction of duplicates: {1-frac:.2%}')
# Deduplicate data and apply heuristics
_lowerCamelCase : List[Any] = time.time()
_lowerCamelCase : Optional[int] = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F'Time to filter dataset: {time.time()-t_start:.2f}')
print(F'Size of filtered dataset: {len(ds_filter)}')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_lowerCamelCase : Union[str, Any] = time.time()
_lowerCamelCase , _lowerCamelCase : Dict = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'Time to deduplicate dataset: {time.time()-t_start:.2f}')
print(F'Size of deduplicate dataset: {len(ds_filter)}')
# Save data in batches of samples_per_file
_lowerCamelCase : Optional[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
_lowerCamelCase : int = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
_lowerCamelCase : Union[str, Any] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_lowerCamelCase : Dict = str(data_dir / F'file-{file_number+1:012}.json')
_lowerCamelCase : str = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'Time to save dataset: {time.time()-t_start:.2f}') | 686 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCAmelCase : Any = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
_lowerCAmelCase : Optional[int] = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "attention_mask"]
__UpperCamelCase = RobertaTokenizer
def __init__( self :Optional[int] , snake_case :str=None , snake_case :Any=None , snake_case :Tuple=None , snake_case :List[Any]="replace" , snake_case :str="<s>" , snake_case :Union[str, Any]="</s>" , snake_case :Optional[Any]="</s>" , snake_case :Optional[Any]="<s>" , snake_case :Optional[Any]="<unk>" , snake_case :Dict="<pad>" , snake_case :Any="<mask>" , snake_case :Dict=False , snake_case :Dict=True , **snake_case :Optional[int] , ):
'''simple docstring'''
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
A_ : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case ) != add_prefix_space:
A_ : Union[str, Any] = getattr(snake_case , pre_tok_state.pop("type" ) )
A_ : Optional[Any] = add_prefix_space
A_ : List[Any] = pre_tok_class(**snake_case )
A_ : Any = add_prefix_space
A_ : Optional[int] = "post_processor"
A_ : List[str] = getattr(self.backend_tokenizer , snake_case , snake_case )
if tokenizer_component_instance:
A_ : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : Optional[int] = tuple(state["sep"] )
if "cls" in state:
A_ : Dict = tuple(state["cls"] )
A_ : List[Any] = False
if state.get("add_prefix_space" , snake_case ) != add_prefix_space:
A_ : Optional[Any] = add_prefix_space
A_ : int = True
if state.get("trim_offsets" , snake_case ) != trim_offsets:
A_ : Dict = trim_offsets
A_ : str = True
if changes_to_apply:
A_ : List[str] = getattr(snake_case , state.pop("type" ) )
A_ : List[str] = component_class(**snake_case )
setattr(self.backend_tokenizer , snake_case , snake_case )
@property
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Any ):
'''simple docstring'''
A_ : List[str] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value
A_ : Union[str, Any] = value
def SCREAMING_SNAKE_CASE ( self :List[Any] , *snake_case :Any , **snake_case :Optional[int] ):
'''simple docstring'''
A_ : List[Any] = kwargs.get("is_split_into_words" , snake_case )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict , *snake_case :Dict , **snake_case :Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = kwargs.get("is_split_into_words" , snake_case )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :str , snake_case :Optional[str] = None ):
'''simple docstring'''
A_ : Union[str, Any] = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Optional[int] , snake_case :Optional[Any]=None ):
'''simple docstring'''
A_ : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :List[int] , snake_case :Optional[List[int]] = None ):
'''simple docstring'''
A_ : int = [self.sep_token_id]
A_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 454 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : int = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = "yolos"
def __init__( self : int , lowercase : List[str]=768 , lowercase : Tuple=12 , lowercase : int=12 , lowercase : int=3_072 , lowercase : Optional[int]="gelu" , lowercase : str=0.0 , lowercase : Optional[int]=0.0 , lowercase : Optional[Any]=0.02 , lowercase : List[str]=1E-12 , lowercase : Dict=[512, 864] , lowercase : Union[str, Any]=16 , lowercase : List[Any]=3 , lowercase : List[str]=True , lowercase : Optional[int]=100 , lowercase : int=True , lowercase : Dict=False , lowercase : str=1 , lowercase : int=5 , lowercase : Tuple=2 , lowercase : List[str]=5 , lowercase : Any=2 , lowercase : List[str]=0.1 , **lowercase : int , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = qkv_bias
_snake_case = num_detection_tokens
_snake_case = use_mid_position_embeddings
_snake_case = auxiliary_loss
# Hungarian matcher
_snake_case = class_cost
_snake_case = bbox_cost
_snake_case = giou_cost
# Loss coefficients
_snake_case = bbox_loss_coefficient
_snake_case = giou_loss_coefficient
_snake_case = eos_coefficient
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = version.parse("1.11" )
@property
def A ( self : str ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Any ):
'''simple docstring'''
return 1E-4
@property
def A ( self : List[Any] ):
'''simple docstring'''
return 12 | 686 | 0 |
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if height >= 1:
move_tower(height - 1 , __lowercase , __lowercase , __lowercase )
move_disk(__lowercase , __lowercase )
move_tower(height - 1 , __lowercase , __lowercase , __lowercase )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
print('''moving disk from''' , __lowercase , '''to''' , __lowercase )
def SCREAMING_SNAKE_CASE_ ( ):
UpperCamelCase__ : Dict = int(input('''Height of hanoi: ''' ).strip() )
move_tower(__lowercase , '''A''' , '''B''' , '''C''' )
if __name__ == "__main__":
main()
| 285 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_lowerCamelCase : Tuple = logging.get_logger(__name__)
# General docstring
_lowerCamelCase : Union[str, Any] = '''ResNetConfig'''
# Base docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[Any] = [1, 2_048, 7, 7]
# Image classification docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[int] = '''tiger cat'''
_lowerCamelCase : str = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 3 , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(
lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=kernel_size // 2 , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def A ( self : Union[str, Any] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def A ( self : Tuple , lowercase : Tensor ):
'''simple docstring'''
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowercase )
_snake_case = self.pooler(lowercase )
return embedding
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase : int , lowercase : int , lowercase : int = 2 ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(lowercase , lowercase , kernel_size=1 , stride=lowercase , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : List[str] , lowercase : List[str] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" , lowercase : int = 4 ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , kernel_size=1 ) , ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : Dict , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowercase : ResNetConfig , lowercase : int , lowercase : int , lowercase : int = 2 , lowercase : int = 2 , ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , stride=lowercase , activation=config.hidden_act ) , *[layer(lowercase , lowercase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase , config.depths[1:] ):
self.stages.append(ResNetStage(lowercase , lowercase , lowercase , depth=lowercase ) )
def A ( self : str , lowercase : Tensor , lowercase : bool = False , lowercase : bool = True ):
'''simple docstring'''
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowercase )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase , hidden_states=lowercase , )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = ResNetConfig
_UpperCAmelCase : Tuple = "resnet"
_UpperCAmelCase : Optional[Any] = "pixel_values"
_UpperCAmelCase : Dict = True
def A ( self : List[str] , lowercase : Dict ):
'''simple docstring'''
if isinstance(lowercase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowercase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def A ( self : Tuple , lowercase : List[Any] , lowercase : Optional[Any]=False ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
_snake_case = value
_lowerCamelCase : str = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCamelCase : int = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : Any ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : Union[str, Any] , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowercase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase : int ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowercase )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Union[str, Any] , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[torch.LongTensor] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowercase )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowercase , lowercase )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : Union[str, Any] ):
'''simple docstring'''
super().__init__(lowercase )
super()._init_backbone(lowercase )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@replace_return_docstrings(output_type=lowercase , config_class=_CONFIG_FOR_DOC )
def A ( self : Dict , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowercase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowercase , ) | 686 | 0 |
def UpperCamelCase__( UpperCamelCase__ : Tuple )->Any:
A__ = [0] * len(__lowercase )
A__ = []
A__ = []
A__ = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__lowercase ) ):
if indegree[i] == 0:
queue.append(__lowercase )
while queue:
A__ = queue.pop(0 )
cnt += 1
topo.append(__lowercase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__lowercase )
if cnt != len(__lowercase ):
print('''Cycle exists''' )
else:
print(__lowercase )
# Adjacency List of Graph
a__: Union[str, Any] = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 190 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 0 |
from __future__ import annotations
import math
def __lowerCAmelCase ( A_ : int , A_ : int , A_ : bool , A_ : list[int] , A_ : float ) -> int:
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__lowercase ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __lowercase , __lowercase , __lowercase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowercase , __lowercase , __lowercase ) , )
return min(
minimax(depth + 1 , node_index * 2 , __lowercase , __lowercase , __lowercase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowercase , __lowercase , __lowercase ) , )
def __lowerCAmelCase ( ) -> None:
__UpperCAmelCase = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
__UpperCAmelCase = math.log(len(__lowercase ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , __lowercase , __lowercase , __lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 221 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def a_ ( __lowercase : Union[str, Any] ) -> List[Any]:
_snake_case = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
_snake_case = DetaConfig(
backbone_config=__lowercase , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=__lowercase , with_box_refine=__lowercase , two_stage=__lowercase , )
# set labels
_snake_case = 'huggingface/label-files'
if "o365" in model_name:
_snake_case = 366
_snake_case = 'object365-id2label.json'
else:
_snake_case = 91
_snake_case = 'coco-detection-id2label.json'
_snake_case = num_labels
_snake_case = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type='dataset' ) ) , 'r' ) )
_snake_case = {int(__lowercase ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def a_ ( __lowercase : int ) -> str:
_snake_case = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : str ) -> Union[str, Any]:
_snake_case = dct.pop(__lowercase )
_snake_case = val
def a_ ( __lowercase : List[str] , __lowercase : str ) -> Dict:
_snake_case = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_snake_case = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:dim, :]
_snake_case = in_proj_bias[: dim]
_snake_case = in_proj_weight[
dim : dim * 2, :
]
_snake_case = in_proj_bias[
dim : dim * 2
]
_snake_case = in_proj_weight[
-dim :, :
]
_snake_case = in_proj_bias[-dim :]
# fmt: on
def a_ ( __lowercase : Dict , __lowercase : Dict ) -> str:
# transformer decoder self-attention layers
_snake_case = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:hidden_size, :]
_snake_case = in_proj_bias[:hidden_size]
_snake_case = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_snake_case = in_proj_bias[hidden_size : hidden_size * 2]
_snake_case = in_proj_weight[-hidden_size:, :]
_snake_case = in_proj_bias[-hidden_size:]
def a_ ( ) -> List[str]:
_snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a_ ( __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Tuple ) -> Optional[Any]:
_snake_case = get_deta_config(__lowercase )
# load original state dict
if model_name == "deta-swin-large":
_snake_case = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
_snake_case = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
_snake_case = torch.load(__lowercase , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(__lowercase , param.shape )
# rename keys
_snake_case = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_swin_q_k_v(__lowercase , config.backbone_config )
read_in_decoder_q_k_v(__lowercase , __lowercase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "input_proj" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
# finally, create HuggingFace model and load state dict
_snake_case = DetaForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
_snake_case = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(__lowercase )
# load image processor
_snake_case = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
_snake_case = prepare_img()
_snake_case = processor(images=__lowercase , return_tensors='pt' )
_snake_case = encoding['pixel_values']
_snake_case = model(pixel_values.to(__lowercase ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_snake_case = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
_snake_case = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
_snake_case = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
_snake_case = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__lowercase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__lowercase ) , atol=1E-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 686 | 0 |
'''simple docstring'''
from math import sqrt
def __snake_case ( _UpperCAmelCase : int):
assert isinstance(__lowercase, __lowercase) and (
number >= 0
), "'number' must been an int and positive"
UpperCamelCase = True
# 0 and 1 are none primes.
if number <= 1:
UpperCamelCase = False
for divisor in range(2, int(round(sqrt(__lowercase))) + 1):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
UpperCamelCase = False
break
# precondition
assert isinstance(__lowercase, __lowercase), "'status' must been from type bool"
return status
def __snake_case ( _UpperCAmelCase : int):
assert isinstance(__lowercase, __lowercase) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
UpperCamelCase = list(range(2, n + 1))
UpperCamelCase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__lowercase)):
for j in range(i + 1, len(__lowercase)):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
UpperCamelCase = 0
# filters actual prime numbers.
UpperCamelCase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__lowercase, __lowercase), "'ans' must been from type list"
return ans
def __snake_case ( _UpperCAmelCase : int):
assert isinstance(__lowercase, __lowercase) and (n > 2), "'N' must been an int and > 2"
UpperCamelCase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2, n + 1):
if is_prime(__lowercase):
ans.append(__lowercase)
# precondition
assert isinstance(__lowercase, __lowercase), "'ans' must been from type list"
return ans
def __snake_case ( _UpperCAmelCase : Optional[int]):
assert isinstance(__lowercase, __lowercase) and number >= 0, "'number' must been an int and >= 0"
UpperCamelCase = [] # this list will be returns of the function.
# potential prime number factors.
UpperCamelCase = 2
UpperCamelCase = number
if number == 0 or number == 1:
ans.append(__lowercase)
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__lowercase):
while quotient != 1:
if is_prime(__lowercase) and (quotient % factor == 0):
ans.append(__lowercase)
quotient /= factor
else:
factor += 1
else:
ans.append(__lowercase)
# precondition
assert isinstance(__lowercase, __lowercase), "'ans' must been from type list"
return ans
def __snake_case ( _UpperCAmelCase : Optional[Any]):
assert isinstance(__lowercase, __lowercase) and (
number >= 0
), "'number' bust been an int and >= 0"
UpperCamelCase = 0
# prime factorization of 'number'
UpperCamelCase = prime_factorization(__lowercase)
UpperCamelCase = max(__lowercase)
# precondition
assert isinstance(__lowercase, __lowercase), "'ans' must been from type int"
return ans
def __snake_case ( _UpperCAmelCase : Optional[int]):
assert isinstance(__lowercase, __lowercase) and (
number >= 0
), "'number' bust been an int and >= 0"
UpperCamelCase = 0
# prime factorization of 'number'
UpperCamelCase = prime_factorization(__lowercase)
UpperCamelCase = min(__lowercase)
# precondition
assert isinstance(__lowercase, __lowercase), "'ans' must been from type int"
return ans
def __snake_case ( _UpperCAmelCase : Union[str, Any]):
assert isinstance(__lowercase, __lowercase), "'number' must been an int"
assert isinstance(number % 2 == 0, __lowercase), "compare bust been from type bool"
return number % 2 == 0
def __snake_case ( _UpperCAmelCase : Optional[int]):
assert isinstance(__lowercase, __lowercase), "'number' must been an int"
assert isinstance(number % 2 != 0, __lowercase), "compare bust been from type bool"
return number % 2 != 0
def __snake_case ( _UpperCAmelCase : Union[str, Any]):
assert (
isinstance(__lowercase, __lowercase) and (number > 2) and is_even(__lowercase)
), "'number' must been an int, even and > 2"
UpperCamelCase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
UpperCamelCase = get_prime_numbers(__lowercase)
UpperCamelCase = len(__lowercase)
# run variable for while-loops.
UpperCamelCase = 0
UpperCamelCase = None
# exit variable. for break up the loops
UpperCamelCase = True
while i < len_pn and loop:
UpperCamelCase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
UpperCamelCase = False
ans.append(prime_numbers[i])
ans.append(prime_numbers[j])
j += 1
i += 1
# precondition
assert (
isinstance(__lowercase, __lowercase)
and (len(__lowercase) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0])
and is_prime(ans[1])
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def __snake_case ( _UpperCAmelCase : Tuple, _UpperCAmelCase : List[Any]):
assert (
isinstance(__lowercase, __lowercase)
and isinstance(__lowercase, __lowercase)
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
UpperCamelCase = 0
while numbera != 0:
UpperCamelCase = numbera % numbera
UpperCamelCase = numbera
UpperCamelCase = rest
# precondition
assert isinstance(__lowercase, __lowercase) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def __snake_case ( _UpperCAmelCase : str, _UpperCAmelCase : int):
assert (
isinstance(__lowercase, __lowercase)
and isinstance(__lowercase, __lowercase)
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
UpperCamelCase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
UpperCamelCase = prime_factorization(__lowercase)
UpperCamelCase = prime_factorization(__lowercase)
elif numbera == 1 or numbera == 1:
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = max(__lowercase, __lowercase)
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
UpperCamelCase = prime_fac_a.count(__lowercase)
UpperCamelCase = prime_fac_a.count(__lowercase)
for _ in range(max(__lowercase, __lowercase)):
ans *= n
else:
UpperCamelCase = prime_fac_a.count(__lowercase)
for _ in range(__lowercase):
ans *= n
done.append(__lowercase)
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
UpperCamelCase = prime_fac_a.count(__lowercase)
for _ in range(__lowercase):
ans *= n
done.append(__lowercase)
# precondition
assert isinstance(__lowercase, __lowercase) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def __snake_case ( _UpperCAmelCase : List[str]):
assert isinstance(__lowercase, __lowercase) and (n >= 0), "'number' must been a positive int"
UpperCamelCase = 0
UpperCamelCase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__lowercase):
ans += 1
# precondition
assert isinstance(__lowercase, __lowercase) and is_prime(
__lowercase), "'ans' must been a prime number and from type int"
return ans
def __snake_case ( _UpperCAmelCase : str, _UpperCAmelCase : Tuple):
assert (
is_prime(__lowercase) and is_prime(__lowercase) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
UpperCamelCase = p_number_a + 1 # jump to the next number
UpperCamelCase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__lowercase):
number += 1
while number < p_number_a:
ans.append(__lowercase)
number += 1
# fetch the next prime number.
while not is_prime(__lowercase):
number += 1
# precondition
assert (
isinstance(__lowercase, __lowercase)
and ans[0] != p_number_a
and ans[len(__lowercase) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def __snake_case ( _UpperCAmelCase : Dict):
assert isinstance(__lowercase, __lowercase) and (n >= 1), "'n' must been int and >= 1"
UpperCamelCase = [] # will be returned.
for divisor in range(1, n + 1):
if n % divisor == 0:
ans.append(__lowercase)
# precondition
assert ans[0] == 1 and ans[len(__lowercase) - 1] == n, "Error in function getDivisiors(...)"
return ans
def __snake_case ( _UpperCAmelCase : Tuple):
assert isinstance(__lowercase, __lowercase) and (
number > 1
), "'number' must been an int and >= 1"
UpperCamelCase = get_divisors(__lowercase)
# precondition
assert (
isinstance(__lowercase, __lowercase)
and (divisors[0] == 1)
and (divisors[len(__lowercase) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1]) == number
def __snake_case ( _UpperCAmelCase : Optional[Any], _UpperCAmelCase : List[Any]):
assert (
isinstance(__lowercase, __lowercase)
and isinstance(__lowercase, __lowercase)
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
UpperCamelCase = gcd(abs(__lowercase), abs(__lowercase))
# precondition
assert (
isinstance(__lowercase, __lowercase)
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def __snake_case ( _UpperCAmelCase : Tuple):
assert isinstance(__lowercase, __lowercase) and (n >= 0), "'n' must been a int and >= 0"
UpperCamelCase = 1 # this will be return.
for factor in range(1, n + 1):
ans *= factor
return ans
def __snake_case ( _UpperCAmelCase : Dict):
assert isinstance(__lowercase, __lowercase) and (n >= 0), "'n' must been an int and >= 0"
UpperCamelCase = 0
UpperCamelCase = 1
UpperCamelCase = 1 # this will be return
for _ in range(n - 1):
UpperCamelCase = ans
ans += fiba
UpperCamelCase = tmp
return ans
| 212 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_lowerCamelCase : Dict = '''pt'''
elif is_tf_available():
_lowerCamelCase : List[str] = '''tf'''
else:
_lowerCamelCase : List[Any] = '''jax'''
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = PerceiverTokenizer
_UpperCAmelCase : Optional[int] = False
def A ( self : Tuple ):
'''simple docstring'''
super().setUp()
_snake_case = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A ( self : str ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def A ( self : Optional[int] , **lowercase : Dict ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : Optional[int] , lowercase : Tuple , lowercase : Optional[Any]=False , lowercase : int=20 , lowercase : Optional[int]=5 ):
'''simple docstring'''
_snake_case = []
for i in range(len(lowercase ) ):
try:
_snake_case = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_snake_case = list(filter(lambda lowercase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , lowercase ) )
_snake_case = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
_snake_case = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
_snake_case = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case = [t[0] for t in toks]
# Ensure consistency
_snake_case = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
_snake_case = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
_snake_case = ' ' + output_txt
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = 'Unicode €.'
_snake_case = tokenizer(lowercase )
_snake_case = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_snake_case = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]Unicode €.[SEP]' )
_snake_case = tokenizer('e è é ê ë' )
_snake_case = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_snake_case = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_snake_case = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
if FRAMEWORK != "jax":
_snake_case = list(batch.input_ids.numpy()[0] )
else:
_snake_case = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , lowercase )
self.assertIn('attention_mask' , lowercase )
self.assertNotIn('decoder_input_ids' , lowercase )
self.assertNotIn('decoder_attention_mask' , lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = [
'Summary of the text.',
'Another summary.',
]
_snake_case = tokenizer(
text_target=lowercase , max_length=32 , padding='max_length' , truncation=lowercase , return_tensors=lowercase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_snake_case = tokenizer.__class__.from_pretrained(lowercase )
_snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
shutil.rmtree(lowercase )
_snake_case = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_snake_case = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_snake_case = tokenizer.__class__.from_pretrained(lowercase )
_snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_snake_case = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowercase )
_snake_case = [f'''<extra_id_{i}>''' for i in range(125 )]
_snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
_snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case = tokenizer_class.from_pretrained(
lowercase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=lowercase )]
_snake_case = tokenizer_class.from_pretrained(
lowercase , additional_special_tokens=lowercase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
pass
def A ( self : List[str] ):
'''simple docstring'''
pass
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : int ):
'''simple docstring'''
_snake_case = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_snake_case = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
_snake_case = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(lowercase , lowercase ) | 686 | 0 |
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowerCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.14.0""", """To fix: pip install -r examples/pytorch/audio-classification/requirements.txt""")
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 16000 ):
'''simple docstring'''
_lowerCAmelCase : str = int(round(sample_rate * max_length ) )
if len(__lowercase ) <= sample_length:
return wav
_lowerCAmelCase : Tuple = randint(0 , len(__lowercase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __UpperCamelCase :
_UpperCAmelCase = field(default=a__ , metadata={"help": "Name of a dataset from the datasets package"} )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "A file containing the training audio paths and labels."} )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "A file containing the validation audio paths and labels."} )
_UpperCAmelCase = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
_UpperCAmelCase = field(
default="validation" , metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
_UpperCAmelCase = field(
default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} , )
_UpperCAmelCase = field(
default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} )
_UpperCAmelCase = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
_UpperCAmelCase = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
_UpperCAmelCase = field(
default=20 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , )
@dataclass
class __UpperCamelCase :
_UpperCAmelCase = field(
default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
_UpperCAmelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "Name or path of preprocessor config."} )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
_UpperCAmelCase = field(
default=a__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'The argument `--freeze_feature_extractor` is deprecated and '
'will be removed in a future version. Use `--freeze_feature_encoder`'
'instead. Setting `freeze_feature_encoder==True`.' ,_A ,)
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'The argument `--freeze_feature_extractor` is deprecated and '
'should not be used in combination with `--freeze_feature_encoder`.'
'Only make use of `--freeze_feature_encoder`.' )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_audio_classification' , __lowercase , __lowercase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCAmelCase : Optional[int] = training_args.get_process_log_level()
logger.setLevel(__lowercase )
transformers.utils.logging.set_verbosity(__lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_lowerCAmelCase : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCAmelCase : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to train from scratch.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset and prepare it for the audio classification task.
_lowerCAmelCase : Optional[int] = DatasetDict()
_lowerCAmelCase : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. """
'Make sure to set `--audio_column_name` to the correct audio column - one of '
f"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. """
'Make sure to set `--label_column_name` to the correct text column - one of '
f"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_lowerCAmelCase : str = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_lowerCAmelCase : List[str] = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_lowerCAmelCase : Any = feature_extractor.model_input_names[0]
def train_transforms(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = []
for audio in batch[data_args.audio_column_name]:
_lowerCAmelCase : Optional[Any] = random_subsample(
audio['array'] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(__lowercase )
_lowerCAmelCase : Any = feature_extractor(__lowercase , sampling_rate=feature_extractor.sampling_rate )
_lowerCAmelCase : List[Any] = {model_input_name: inputs.get(__lowercase )}
_lowerCAmelCase : int = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_lowerCamelCase ):
_lowerCAmelCase : Any = [audio['array'] for audio in batch[data_args.audio_column_name]]
_lowerCAmelCase : List[str] = feature_extractor(__lowercase , sampling_rate=feature_extractor.sampling_rate )
_lowerCAmelCase : Optional[Any] = {model_input_name: inputs.get(__lowercase )}
_lowerCAmelCase : Union[str, Any] = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_lowerCAmelCase : Optional[int] = raw_datasets['train'].features[data_args.label_column_name].names
_lowerCAmelCase, _lowerCAmelCase : str = {}, {}
for i, label in enumerate(__lowercase ):
_lowerCAmelCase : Optional[Any] = str(__lowercase )
_lowerCAmelCase : Tuple = label
# Load the accuracy metric from the datasets package
_lowerCAmelCase : Union[str, Any] = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_lowerCamelCase ):
_lowerCAmelCase : Tuple = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=__lowercase , references=eval_pred.label_ids )
_lowerCAmelCase : int = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowercase ) , labelaid=__lowercase , idalabel=__lowercase , finetuning_task='audio-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : List[str] = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_lowerCAmelCase : Optional[int] = (
raw_datasets['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(__lowercase , output_all_columns=__lowercase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_lowerCAmelCase : Dict = (
raw_datasets['eval'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(__lowercase , output_all_columns=__lowercase )
# Initialize our trainer
_lowerCAmelCase : Any = Trainer(
model=__lowercase , args=__lowercase , train_dataset=raw_datasets['train'] if training_args.do_train else None , eval_dataset=raw_datasets['eval'] if training_args.do_eval else None , compute_metrics=__lowercase , tokenizer=__lowercase , )
# Training
if training_args.do_train:
_lowerCAmelCase : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
_lowerCAmelCase : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCAmelCase : Union[str, Any] = last_checkpoint
_lowerCAmelCase : int = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_lowerCAmelCase : Any = trainer.evaluate()
trainer.log_metrics('eval' , __lowercase )
trainer.save_metrics('eval' , __lowercase )
# Write model card and (optionally) push to hub
_lowerCAmelCase : List[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'audio-classification',
'dataset': data_args.dataset_name,
'tags': ['audio-classification'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowercase )
else:
trainer.create_model_card(**__lowercase )
if __name__ == "__main__":
main()
| 259 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def a_ ( ) -> Optional[int]:
_snake_case , _snake_case = 9, 14 # noqa: F841
_snake_case = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_snake_case = defaultdict(__lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_snake_case = mst(__lowercase )
_snake_case = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_snake_case = tuple(answer[:2] )
_snake_case = tuple(edge[::-1] )
assert edge in result or reverse in result | 686 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def UpperCAmelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float):
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative')
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative')
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance)))),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 320 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Tuple = ["transformers", "torch", "note_seq"]
def __init__( self : List[Any] , *lowercase : List[Any] , **lowercase : Dict ):
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] ) | 686 | 0 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class snake_case_ :
def __init__( self : Union[str, Any] , _snake_case : Dict , _snake_case : List[str]=99 , _snake_case : Optional[Any]=13 , _snake_case : Union[str, Any]=16 , _snake_case : Optional[int]=7 , _snake_case : str=True , _snake_case : Optional[int]=True , _snake_case : List[str]=True , _snake_case : Dict=False , _snake_case : List[str]=True , _snake_case : Any=2 , _snake_case : Optional[int]=32 , _snake_case : Optional[Any]=4 , _snake_case : Union[str, Any]=4 , _snake_case : Any=30 , _snake_case : Optional[Any]=0 , _snake_case : List[str]=1 , _snake_case : Union[str, Any]=2 , _snake_case : List[Any]=None , )->Dict:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : str = batch_size
__lowerCAmelCase : int = decoder_seq_length
# For common tests
__lowerCAmelCase : Dict = self.decoder_seq_length
__lowerCAmelCase : Optional[Any] = is_training
__lowerCAmelCase : Tuple = use_attention_mask
__lowerCAmelCase : Tuple = use_labels
__lowerCAmelCase : Union[str, Any] = vocab_size
__lowerCAmelCase : Dict = d_model
__lowerCAmelCase : Optional[int] = d_model
__lowerCAmelCase : List[Any] = decoder_layers
__lowerCAmelCase : List[str] = decoder_layers
__lowerCAmelCase : List[Any] = decoder_ffn_dim
__lowerCAmelCase : Optional[int] = decoder_attention_heads
__lowerCAmelCase : Optional[Any] = decoder_attention_heads
__lowerCAmelCase : List[Any] = eos_token_id
__lowerCAmelCase : int = bos_token_id
__lowerCAmelCase : Any = pad_token_id
__lowerCAmelCase : int = decoder_start_token_id
__lowerCAmelCase : Optional[Any] = use_cache
__lowerCAmelCase : Tuple = max_position_embeddings
__lowerCAmelCase : Optional[Any] = None
__lowerCAmelCase : Union[str, Any] = decoder_seq_length
__lowerCAmelCase : Any = 2
__lowerCAmelCase : List[str] = 1
def UpperCAmelCase__ ( self : Tuple )->str:
'''simple docstring'''
__lowerCAmelCase : str = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__lowerCAmelCase : Union[str, Any] = None
if self.use_attention_mask:
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__lowerCAmelCase : Dict = None
if self.use_labels:
__lowerCAmelCase : Any = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__lowerCAmelCase : int = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def UpperCAmelCase__ ( self : List[Any] , _snake_case : Optional[Any] , _snake_case : int , _snake_case : List[Any] , _snake_case : Optional[Any] , )->int:
'''simple docstring'''
__lowerCAmelCase : Tuple = True
__lowerCAmelCase : List[Any] = TrOCRDecoder(config=_snake_case ).to(_snake_case ).eval()
__lowerCAmelCase : Any = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__lowerCAmelCase : List[Any] = model(_snake_case , use_cache=_snake_case )
__lowerCAmelCase : str = model(_snake_case )
__lowerCAmelCase : Union[str, Any] = model(_snake_case , use_cache=_snake_case )
self.parent.assertTrue(len(_snake_case ) == len(_snake_case ) )
self.parent.assertTrue(len(_snake_case ) == len(_snake_case ) + 1 )
__lowerCAmelCase : Union[str, Any] = outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
__lowerCAmelCase : List[Any] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__lowerCAmelCase : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCAmelCase : List[str] = model(_snake_case )["""last_hidden_state"""]
__lowerCAmelCase : Union[str, Any] = model(_snake_case , past_key_values=_snake_case )["""last_hidden_state"""]
# select random slice
__lowerCAmelCase : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCAmelCase : Any = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__lowerCAmelCase : Union[str, Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_snake_case , _snake_case , atol=1E-3 )
def UpperCAmelCase__ ( self : Union[str, Any] )->Tuple:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[str] = config_and_inputs
__lowerCAmelCase : Tuple = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class snake_case_ ( __lowercase ,__lowercase ,__lowercase ,unittest.TestCase ):
A_ = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A_ = (TrOCRForCausalLM,) if is_torch_available() else ()
A_ = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
A_ = True
A_ = False
def UpperCAmelCase__ ( self : Optional[Any] )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = TrOCRStandaloneDecoderModelTester(self , is_training=_snake_case )
__lowerCAmelCase : List[str] = ConfigTester(self , config_class=_snake_case )
def UpperCAmelCase__ ( self : Optional[Any] )->int:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Any )->List[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Any )->Any:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Optional[int] )->Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : int )->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_snake_case )
def UpperCAmelCase__ ( self : Any )->List[Any]:
'''simple docstring'''
return
@unittest.skip("""The model doesn\'t support left padding""" ) # and it's not used enough to be worth fixing :)
def UpperCAmelCase__ ( self : str )->List[str]:
'''simple docstring'''
pass | 504 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a_ ( ) -> Optional[Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__lowercase ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def a_ ( ) -> Optional[int]:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def a_ ( ) -> Dict:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__lowercase ):
http_head('https://huggingface.co' ) | 686 | 0 |
'''simple docstring'''
from collections import defaultdict
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Any = 1
lowerCAmelCase__ : str = True
for v in tree[start]:
if v not in visited:
ret += dfs(__lowercase )
if ret % 2 == 0:
cuts.append(__lowercase )
return ret
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
_lowerCAmelCase = 10, 9
_lowerCAmelCase = defaultdict(list)
_lowerCAmelCase = {}
_lowerCAmelCase = []
_lowerCAmelCase = 0
_lowerCAmelCase = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 565 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCamelCase : Optional[int] = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
_lowerCamelCase : List[str] = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
_lowerCamelCase : Dict = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def A ( self : Union[str, Any] , lowercase : Tuple , lowercase : Optional[Any] , lowercase : int=None , lowercase : str=True , lowercase : List[str]=False ):
'''simple docstring'''
if rouge_types is None:
_snake_case = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
_snake_case = rouge_scorer.RougeScorer(rouge_types=lowercase , use_stemmer=lowercase )
if use_aggregator:
_snake_case = scoring.BootstrapAggregator()
else:
_snake_case = []
for ref, pred in zip(lowercase , lowercase ):
_snake_case = scorer.score(lowercase , lowercase )
if use_aggregator:
aggregator.add_scores(lowercase )
else:
scores.append(lowercase )
if use_aggregator:
_snake_case = aggregator.aggregate()
else:
_snake_case = {}
for key in scores[0]:
_snake_case = [score[key] for score in scores]
return result | 686 | 0 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ : Union[str, Any] = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ : str = False
UpperCamelCase_ : Tuple = False
def _A ( self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : str=False ):
SCREAMING_SNAKE_CASE : str = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
if return_labels:
if model_class in get_values(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]=13 , UpperCAmelCase_ : int=7 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Tuple=99 , UpperCAmelCase_ : List[str]=32 , UpperCAmelCase_ : Dict=32 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : Optional[Any]=37 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : List[Any]=16 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : Any=None , ):
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : List[str] = batch_size
SCREAMING_SNAKE_CASE : Dict = seq_length
SCREAMING_SNAKE_CASE : Any = is_training
SCREAMING_SNAKE_CASE : Any = use_input_mask
SCREAMING_SNAKE_CASE : str = use_token_type_ids
SCREAMING_SNAKE_CASE : Any = use_labels
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE : Tuple = type_sequence_label_size
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE : Optional[Any] = num_choices
SCREAMING_SNAKE_CASE : str = scope
SCREAMING_SNAKE_CASE : Union[str, Any] = embedding_size
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Dict = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE : int = TFMobileBertModel(config=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE : str = model(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : List[Any] = model(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _A ( self : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE : Optional[Any] = TFMobileBertForMaskedLM(config=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE : Union[str, Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : List[str] = TFMobileBertForNextSentencePrediction(config=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE : List[str] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _A ( self : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Union[str, Any] = TFMobileBertForPreTraining(config=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE : Any = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _A ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = TFMobileBertForSequenceClassification(config=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE : Optional[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Tuple = self.num_choices
SCREAMING_SNAKE_CASE : Union[str, Any] = TFMobileBertForMultipleChoice(config=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : List[str] = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Optional[int] = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Optional[int] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE : Tuple = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = TFMobileBertForTokenClassification(config=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE : List[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : List[Any] = TFMobileBertForQuestionAnswering(config=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE : Dict = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = TFMobileBertModelTest.TFMobileBertModelTester(self )
SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def _A ( self : str ):
self.config_tester.run_common_tests()
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*UpperCAmelCase_ )
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*UpperCAmelCase_ )
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*UpperCAmelCase_ )
def _A ( self : int ):
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*UpperCAmelCase_ )
def _A ( self : str ):
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*UpperCAmelCase_ )
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*UpperCAmelCase_ )
def _A ( self : int ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*UpperCAmelCase_ )
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*UpperCAmelCase_ )
@slow
def _A ( self : Optional[Any] ):
for model_name in ["google/mobilebert-uncased"]:
SCREAMING_SNAKE_CASE : int = TFMobileBertModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : Tuple = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" )
SCREAMING_SNAKE_CASE : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : List[str] = model(UpperCAmelCase_ )[0]
SCREAMING_SNAKE_CASE : Tuple = [1, 6, 3_0522]
self.assertEqual(output.shape , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 )
| 62 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = "swin2sr"
_UpperCAmelCase : Optional[int] = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[int] , lowercase : List[Any]=64 , lowercase : int=1 , lowercase : Union[str, Any]=3 , lowercase : Dict=180 , lowercase : List[Any]=[6, 6, 6, 6, 6, 6] , lowercase : Dict=[6, 6, 6, 6, 6, 6] , lowercase : List[Any]=8 , lowercase : List[str]=2.0 , lowercase : Tuple=True , lowercase : Union[str, Any]=0.0 , lowercase : Dict=0.0 , lowercase : Optional[int]=0.1 , lowercase : int="gelu" , lowercase : List[str]=False , lowercase : List[Any]=0.02 , lowercase : List[Any]=1E-5 , lowercase : Optional[int]=2 , lowercase : Tuple=1.0 , lowercase : List[Any]="1conv" , lowercase : List[Any]="pixelshuffle" , **lowercase : List[str] , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = embed_dim
_snake_case = depths
_snake_case = len(lowercase )
_snake_case = num_heads
_snake_case = window_size
_snake_case = mlp_ratio
_snake_case = qkv_bias
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = drop_path_rate
_snake_case = hidden_act
_snake_case = use_absolute_embeddings
_snake_case = layer_norm_eps
_snake_case = initializer_range
_snake_case = upscale
_snake_case = img_range
_snake_case = resi_connection
_snake_case = upsampler | 686 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
lowerCamelCase : List[str] = '''
Human: <<task>>
Assistant: '''
lowerCamelCase : Optional[int] = '''huggingface-tools/default-prompts'''
lowerCamelCase : int = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase="run" ) -> str:
if prompt_or_repo_id is None:
snake_case : List[Any] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("""\\s""" ,__lowercase ) is not None:
return prompt_or_repo_id
snake_case : List[str] = cached_file(
__lowercase ,PROMPT_FILES[mode] ,repo_type="""dataset""" ,user_agent={"""agent""": agent_name} )
with open(__lowercase ,"""r""" ,encoding="""utf-8""" ) as f:
return f.read()
| 587 |
import random
def a_ ( __lowercase : str , __lowercase : Any , __lowercase : Any ) -> Optional[Any]:
_snake_case = a[left_index]
_snake_case = left_index + 1
for j in range(left_index + 1 , __lowercase ):
if a[j] < pivot:
_snake_case , _snake_case = a[i], a[j]
i += 1
_snake_case , _snake_case = a[i - 1], a[left_index]
return i - 1
def a_ ( __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Optional[int] ) -> Tuple:
if left < right:
_snake_case = random.randint(__lowercase , right - 1 )
_snake_case , _snake_case = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_snake_case = partition(__lowercase , __lowercase , __lowercase )
quick_sort_random(
__lowercase , __lowercase , __lowercase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__lowercase , pivot_index + 1 , __lowercase ) # recursive quicksort to the right of the pivot point
def a_ ( ) -> str:
_snake_case = input('Enter numbers separated by a comma:\n' ).strip()
_snake_case = [int(__lowercase ) for item in user_input.split(',' )]
quick_sort_random(__lowercase , 0 , len(__lowercase ) )
print(__lowercase )
if __name__ == "__main__":
main() | 686 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __snake_case ( _lowerCAmelCase : Optional[Any] ) -> Any:
A_ : Optional[int] = filter(lambda _lowerCAmelCase : p.requires_grad , model.parameters() )
A_ : Union[str, Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_lowerCAmelCase : Tuple = logging.getLogger(__name__)
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
if metric == "rouge2":
A_ : int = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
A_ : Any = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
A_ : List[str] = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
A_ : Optional[Any] = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
f"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
" function." )
A_ : Tuple = ModelCheckpoint(
dirpath=__lowercase , filename=__lowercase , monitor=f"val_{metric}" , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
return EarlyStopping(
monitor=f"val_{metric}" , mode="min" if "loss" in metric else "max" , patience=__lowercase , verbose=__lowercase , )
class __magic_name__ ( pl.Callback ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :int , snake_case :str ):
'''simple docstring'''
A_ : Tuple = {f"lr_group_{i}": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(snake_case )
@rank_zero_only
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :pl.Trainer , snake_case :pl.LightningModule , snake_case :str , snake_case :Optional[int]=True ):
'''simple docstring'''
logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****" )
A_ : str = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
A_ : Any = Path(pl_module.hparams.output_dir )
if type_path == "test":
A_ : Union[str, Any] = od / "test_results.txt"
A_ : Dict = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
A_ : Dict = od / f"{type_path}_results/{trainer.global_step:05d}.txt"
A_ : Dict = od / f"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=snake_case )
generations_file.parent.mkdir(exist_ok=snake_case )
with open(snake_case , "a+" ) as writer:
for key in sorted(snake_case ):
if key in ["log", "progress_bar", "preds"]:
continue
A_ : List[str] = metrics[key]
if isinstance(snake_case , torch.Tensor ):
A_ : List[str] = val.item()
A_ : int = f"{key}: {val:.6f}\n"
writer.write(snake_case )
if not save_generations:
return
if "preds" in metrics:
A_ : Dict = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(snake_case )
@rank_zero_only
def SCREAMING_SNAKE_CASE ( self :int , snake_case :str , snake_case :Tuple ):
'''simple docstring'''
try:
A_ : List[str] = pl_module.model.model.num_parameters()
except AttributeError:
A_ : List[str] = pl_module.model.num_parameters()
A_ : Union[str, Any] = count_trainable_parameters(snake_case )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :pl.Trainer , snake_case :pl.LightningModule ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(snake_case , snake_case , "test" )
@rank_zero_only
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :pl.Trainer , snake_case :Tuple ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 454 |
import math
def a_ ( __lowercase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a_ ( __lowercase : float = 0.1 ) -> int:
_snake_case = 3
_snake_case = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__lowercase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 | 0 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class _lowerCamelCase :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = LEDConfig
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = "gelu"
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=9_9 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=2_0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=4 , ) -> str:
"""simple docstring"""
UpperCamelCase__ : str = parent
UpperCamelCase__ : Dict = batch_size
UpperCamelCase__ : Dict = seq_length
UpperCamelCase__ : Union[str, Any] = is_training
UpperCamelCase__ : Dict = use_labels
UpperCamelCase__ : Dict = vocab_size
UpperCamelCase__ : List[Any] = hidden_size
UpperCamelCase__ : Optional[Any] = num_hidden_layers
UpperCamelCase__ : Any = num_attention_heads
UpperCamelCase__ : str = intermediate_size
UpperCamelCase__ : List[str] = hidden_dropout_prob
UpperCamelCase__ : List[str] = attention_probs_dropout_prob
UpperCamelCase__ : Optional[Any] = max_position_embeddings
UpperCamelCase__ : Dict = eos_token_id
UpperCamelCase__ : Union[str, Any] = pad_token_id
UpperCamelCase__ : Any = bos_token_id
UpperCamelCase__ : Any = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
UpperCamelCase__ : Any = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
UpperCamelCase__ : str = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase__ : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase__ : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ : List[str] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
UpperCamelCase__ : List[Any] = prepare_led_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = tf.concat(
[tf.zeros_like(__SCREAMING_SNAKE_CASE )[:, :-1], tf.ones_like(__SCREAMING_SNAKE_CASE )[:, -1:]] , axis=-1 , )
UpperCamelCase__ : Optional[int] = global_attention_mask
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = TFLEDModel(config=__SCREAMING_SNAKE_CASE ).get_decoder()
UpperCamelCase__ : Dict = inputs_dict['''input_ids''']
UpperCamelCase__ : List[Any] = input_ids[:1, :]
UpperCamelCase__ : str = inputs_dict['''attention_mask'''][:1, :]
UpperCamelCase__ : Optional[int] = 1
# first forward pass
UpperCamelCase__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ ,UpperCamelCase__ : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase__ : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ : Dict = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase__ : str = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase__ : Optional[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase__ : str = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0]
UpperCamelCase__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase__ : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase__ : Dict = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase__ : str = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , rtol=1e-3 )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ):
if attention_mask is None:
UpperCamelCase__ : Dict = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCamelCase__ : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase__ : Optional[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase__ : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : List[str] = TFLEDModelTester(self )
UpperCamelCase__ : List[str] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Union[str, Any] = tf.zeros_like(inputs_dict['''attention_mask'''] )
UpperCamelCase__ : Tuple = 2
UpperCamelCase__ : Dict = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
UpperCamelCase__ : List[str] = True
UpperCamelCase__ : Dict = self.model_tester.seq_length
UpperCamelCase__ : Optional[Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : List[str] = outputs.decoder_attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : List[Any] = [t.numpy() for t in outputs.encoder_attentions]
UpperCamelCase__ : Optional[int] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
UpperCamelCase__ : Tuple = True
UpperCamelCase__ : List[str] = False
UpperCamelCase__ : int = False
UpperCamelCase__ : Optional[int] = model_class(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Optional[Any] = len(__SCREAMING_SNAKE_CASE )
self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(__SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
UpperCamelCase__ : Optional[Any] = model_class(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_decoder_attentions_output(__SCREAMING_SNAKE_CASE )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase__ : Union[str, Any] = True
UpperCamelCase__ : int = model_class(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(__SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
UpperCamelCase__ : Any = True
UpperCamelCase__ : str = True
UpperCamelCase__ : List[str] = model_class(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__SCREAMING_SNAKE_CASE ) )
self.assertEqual(model.config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(__SCREAMING_SNAKE_CASE )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
return tf.constant(__lowercase , dtype=tf.intaa )
lowerCamelCase =1e-4
@slow
@require_tf
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
UpperCamelCase__ : Optional[Any] = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
UpperCamelCase__ : List[Any] = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
UpperCamelCase__ : List[Any] = prepare_led_inputs_dict(model.config , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = model(**__SCREAMING_SNAKE_CASE )[0]
UpperCamelCase__ : Union[str, Any] = (1, 1_0_2_4, 7_6_8)
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
# change to expected output here
UpperCamelCase__ : Union[str, Any] = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-3 )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : List[str] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
UpperCamelCase__ : Optional[Any] = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
UpperCamelCase__ : Optional[Any] = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
UpperCamelCase__ : Any = prepare_led_inputs_dict(model.config , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = model(**__SCREAMING_SNAKE_CASE )[0]
UpperCamelCase__ : int = (1, 1_0_2_4, model.config.vocab_size)
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
# change to expected output here
UpperCamelCase__ : List[Any] = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-3 , rtol=1e-3 )
| 285 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = "resnet"
_UpperCAmelCase : Any = ["basic", "bottleneck"]
def __init__( self : Union[str, Any] , lowercase : Dict=3 , lowercase : Any=64 , lowercase : Any=[256, 512, 1_024, 2_048] , lowercase : Dict=[3, 4, 6, 3] , lowercase : Any="bottleneck" , lowercase : Optional[Any]="relu" , lowercase : Dict=False , lowercase : str=None , lowercase : Tuple=None , **lowercase : List[Any] , ):
'''simple docstring'''
super().__init__(**lowercase )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
_snake_case = num_channels
_snake_case = embedding_size
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = layer_type
_snake_case = hidden_act
_snake_case = downsample_in_first_stage
_snake_case = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(lowercase ) + 1 )]
_snake_case , _snake_case = get_aligned_output_features_output_indices(
out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = version.parse("1.11" )
@property
def A ( self : int ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
return 1E-3 | 686 | 0 |
import math
def UpperCamelCase__( UpperCamelCase__ : int )->bool:
assert isinstance(__lowercase , __lowercase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
A__ = range(3 , int(math.sqrt(__lowercase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def UpperCamelCase__( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any]=1 , **UpperCamelCase__ : Tuple )->Union[str, Any]:
A__ = factor * value
A__ = value
while not is_prime(__lowercase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **__lowercase )
return value
| 190 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : List[Any] , lowercase : Union[str, Any] , lowercase : int ):
'''simple docstring'''
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(lowercase ) for s in shape] )}.npy'''
def A ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def A ( self : List[Any] , lowercase : Tuple=0 , lowercase : Optional[int]=(4, 4, 64, 64) , lowercase : Optional[int]=False ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase )
return image
def A ( self : Tuple , lowercase : Any=False , lowercase : Union[str, Any]="CompVis/stable-diffusion-v1-4" ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = 'bf16' if fpaa else None
_snake_case , _snake_case = FlaxUNetaDConditionModel.from_pretrained(
lowercase , subfolder='unet' , dtype=lowercase , revision=lowercase )
return model, params
def A ( self : Union[str, Any] , lowercase : str=0 , lowercase : Optional[Any]=(4, 77, 768) , lowercase : int=False ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def A ( self : Tuple , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : List[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=lowercase )
_snake_case = self.get_latents(lowercase , fpaa=lowercase )
_snake_case = self.get_encoder_hidden_states(lowercase , fpaa=lowercase )
_snake_case = model.apply(
{'params': params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
_snake_case = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_snake_case = jnp.array(lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def A ( self : str , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : List[str] ):
'''simple docstring'''
_snake_case , _snake_case = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=lowercase )
_snake_case = self.get_latents(lowercase , shape=(4, 4, 96, 96) , fpaa=lowercase )
_snake_case = self.get_encoder_hidden_states(lowercase , shape=(4, 77, 1_024) , fpaa=lowercase )
_snake_case = model.apply(
{'params': params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
_snake_case = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_snake_case = jnp.array(lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1E-2 ) | 686 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
lowerCAmelCase__ : str = "audio-spectrogram-transformer"
def __init__( self: Any , __lowerCAmelCase: int=768 , __lowerCAmelCase: str=12 , __lowerCAmelCase: Optional[Any]=12 , __lowerCAmelCase: Optional[int]=3_072 , __lowerCAmelCase: List[Any]="gelu" , __lowerCAmelCase: int=0.0 , __lowerCAmelCase: str=0.0 , __lowerCAmelCase: Optional[Any]=0.02 , __lowerCAmelCase: Dict=1E-12 , __lowerCAmelCase: List[Any]=16 , __lowerCAmelCase: Dict=True , __lowerCAmelCase: int=10 , __lowerCAmelCase: Optional[Any]=10 , __lowerCAmelCase: Union[str, Any]=1_024 , __lowerCAmelCase: Optional[Any]=128 , **__lowerCAmelCase: Dict , ) -> List[Any]:
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = patch_size
__UpperCAmelCase = qkv_bias
__UpperCAmelCase = frequency_stride
__UpperCAmelCase = time_stride
__UpperCAmelCase = max_length
__UpperCAmelCase = num_mel_bins
| 221 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( __lowercase : Any ) -> List[Any]:
_snake_case = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a_ ( __lowercase : Dict ) -> Tuple:
_snake_case , _snake_case = emb.weight.shape
_snake_case = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_snake_case = emb.weight.data
return lin_layer
def a_ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None ) -> Tuple:
_snake_case = {}
for old_key in state_dict.keys():
_snake_case = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_snake_case = key.replace('moe_layer.experts.0' , f'''ffn.experts.expert_{expert_idx}''' )
else:
_snake_case = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
_snake_case = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
_snake_case = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
_snake_case = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
_snake_case = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
_snake_case = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
_snake_case = key.replace('final_layer_norm' , 'ff_layer_norm' )
_snake_case = state_dict[old_key]
return new_dict
def a_ ( __lowercase : Optional[Any] , __lowercase : Tuple , __lowercase : Any , __lowercase : List[str] , __lowercase : str = WEIGHTS_NAME ) -> Union[str, Any]:
_snake_case = []
_snake_case = 0
os.makedirs(__lowercase , exist_ok=__lowercase )
for expert in range(__lowercase ):
_snake_case = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(__lowercase ):
_snake_case = torch.load(__lowercase )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = os.path.join(
__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
torch.save(__lowercase , __lowercase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__lowercase )[0]].dtype )
# Add the last block
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
_snake_case = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__lowercase ) == 1:
_snake_case = os.path.join(__lowercase , __lowercase )
torch.save(__lowercase , __lowercase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__lowercase , __lowercase )
# Otherwise, let's build the index
_snake_case = {}
for idx, shard in enumerate(__lowercase ):
_snake_case = weights_name.replace('.bin' , f'''-{idx+1:05d}-of-{len(__lowercase ):05d}.bin''' )
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__lowercase , os.path.join(__lowercase , __lowercase ) )
for key in shard:
_snake_case = shard_file
# Add the metadata
_snake_case = {'total_size': total_size}
_snake_case = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(__lowercase , __lowercase ) , 'w' , encoding='utf-8' ) as f:
_snake_case = json.dumps(__lowercase , indent=2 , sort_keys=__lowercase ) + '\n'
f.write(__lowercase )
return metadata, index
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
_lowerCamelCase : List[str] = parser.parse_args()
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_lowerCamelCase : Tuple = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_lowerCamelCase : Dict = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path) | 686 | 0 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
snake_case_ : Union[str, Any] = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
snake_case_ : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __snake_case ( _UpperCAmelCase : str):
if "://" in dataset_path:
UpperCamelCase = dataset_path.split('''://''')[1]
return dataset_path
def __snake_case ( _UpperCAmelCase : fsspec.AbstractFileSystem):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __snake_case ( _UpperCAmelCase : fsspec.AbstractFileSystem, _UpperCAmelCase : str, _UpperCAmelCase : str):
UpperCamelCase = not is_remote_filesystem(__lowercase)
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__lowercase), fs._strip_protocol(__lowercase))
else:
fs.mv(__lowercase, __lowercase, recursive=__lowercase)
def __snake_case ( ):
if hasattr(fsspec.asyn, '''reset_lock'''):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = threading.Lock()
| 212 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_lowerCamelCase : List[Any] = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_lowerCamelCase : Any = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_lowerCamelCase : Union[str, Any] = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def a_ ( __lowercase : List[Any] , __lowercase : Any ) -> Union[str, Any]:
return float((preds == labels).mean() )
def a_ ( __lowercase : Optional[Any] , __lowercase : List[str] ) -> Dict:
_snake_case = simple_accuracy(__lowercase , __lowercase )
_snake_case = float(fa_score(y_true=__lowercase , y_pred=__lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( __lowercase : int , __lowercase : str ) -> str:
_snake_case = float(pearsonr(__lowercase , __lowercase )[0] )
_snake_case = float(spearmanr(__lowercase , __lowercase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def A ( self : List[Any] , lowercase : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowercase , lowercase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowercase , lowercase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowercase , lowercase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' ) | 686 | 0 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = False
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
_lowerCAmelCase = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
_lowerCAmelCase = '''''' if has_file(args.repo_path, """config.json""") else '''unet'''
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
_lowerCAmelCase = reader.read()
_lowerCAmelCase = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
_lowerCAmelCase = UNetaDModel(**config)
else:
_lowerCAmelCase = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
_lowerCAmelCase = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
_lowerCAmelCase = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
_lowerCAmelCase = config[key]
del config[key]
_lowerCAmelCase = [k.replace("""UNetRes""", """""") for k in config['''down_block_types''']]
_lowerCAmelCase = [k.replace("""UNetRes""", """""") for k in config['''up_block_types''']]
if do_only_weights:
_lowerCAmelCase = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
_lowerCAmelCase = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
_lowerCAmelCase = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
_lowerCAmelCase = param_value
_lowerCAmelCase = True
if not has_changed:
_lowerCAmelCase = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 259 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_lowerCamelCase : Dict = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = "sequence-classification"
def __init__( self : Optional[int] , lowercase : Any ):
'''simple docstring'''
if type(lowercase ) == dict:
_snake_case = Namespace(**lowercase )
_snake_case = glue_output_modes[hparams.task]
_snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(lowercase , lowercase , self.mode )
def A ( self : Optional[Any] , **lowercase : Optional[Any] ):
'''simple docstring'''
return self.model(**lowercase )
def A ( self : Optional[Any] , lowercase : str , lowercase : Tuple ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case = outputs[0]
_snake_case = self.trainer.lr_schedulers[0]['scheduler']
_snake_case = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.hparams
_snake_case = processors[args.task]()
_snake_case = processor.get_labels()
for mode in ["train", "dev"]:
_snake_case = self._feature_file(lowercase )
if os.path.exists(lowercase ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , lowercase )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_snake_case = convert_examples_to_features(
lowercase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , lowercase )
torch.save(lowercase , lowercase )
def A ( self : Dict , lowercase : str , lowercase : int , lowercase : bool = False ):
'''simple docstring'''
_snake_case = 'dev' if mode == 'test' else mode
_snake_case = self._feature_file(lowercase )
logger.info('Loading features from cached file %s' , lowercase )
_snake_case = torch.load(lowercase )
_snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(lowercase , lowercase , lowercase , lowercase ) , batch_size=lowercase , shuffle=lowercase , )
def A ( self : str , lowercase : Optional[Any] , lowercase : str ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case , _snake_case = outputs[:2]
_snake_case = logits.detach().cpu().numpy()
_snake_case = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : int , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_snake_case = np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_snake_case = np.argmax(lowercase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_snake_case = np.squeeze(lowercase )
_snake_case = np.concatenate([x['target'] for x in outputs] , axis=0 )
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , lowercase , lowercase )}
_snake_case = dict(results.items() )
_snake_case = results
return ret, preds_list, out_label_list
def A ( self : int , lowercase : list ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : List[str] , lowercase : Any ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( lowercase : Tuple , lowercase : Any ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(lowercase , lowercase )
parser.add_argument(
'--max_seq_length' , default=128 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=lowercase , required=lowercase , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=lowercase , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def a_ ( ) -> Union[str, Any]:
_snake_case = argparse.ArgumentParser()
add_generic_args(__lowercase , os.getcwd() )
_snake_case = GLUETransformer.add_model_specific_args(__lowercase , os.getcwd() )
_snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_snake_case = os.path.join(
'./results' , f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_snake_case = GLUETransformer(__lowercase )
_snake_case = generic_train(__lowercase , __lowercase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_snake_case = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=__lowercase ) )
_snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__lowercase )
if __name__ == "__main__":
main() | 686 | 0 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
A = re.compile(R'\s+')
def UpperCAmelCase ( UpperCAmelCase__ : List[Any]):
return {"hash": hashlib.mda(re.sub(__lowercase , '' , example['content']).encode('utf-8')).hexdigest()}
def UpperCAmelCase ( UpperCAmelCase__ : List[Any]):
lowerCamelCase : Any = [len(__lowercase) for line in example['content'].splitlines()]
return {"line_mean": np.mean(__lowercase), "line_max": max(__lowercase)}
def UpperCAmelCase ( UpperCAmelCase__ : Optional[int]):
lowerCamelCase : Tuple = np.mean([c.isalnum() for c in example['content']])
return {"alpha_frac": alpha_frac}
def UpperCAmelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any]):
if example["hash"] in uniques:
uniques.remove(example['hash'])
return True
else:
return False
def UpperCAmelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int=5):
lowerCamelCase : Tuple = ['auto-generated', 'autogenerated', 'automatically generated']
lowerCamelCase : Union[str, Any] = example['content'].splitlines()
for _, line in zip(range(__lowercase) , __lowercase):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def UpperCAmelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int=5 , UpperCAmelCase__ : Tuple=0.0_5):
lowerCamelCase : Optional[Any] = ['unit tests', 'test file', 'configuration file']
lowerCamelCase : Optional[Any] = example['content'].splitlines()
lowerCamelCase : Tuple = 0
lowerCamelCase : Optional[Any] = 0
# first test
for _, line in zip(range(__lowercase) , __lowercase):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
lowerCamelCase : Tuple = example['content'].count('\n')
lowerCamelCase : Union[str, Any] = int(coeff * nlines)
for line in lines:
count_config += line.lower().count('config')
count_test += line.lower().count('test')
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def UpperCAmelCase ( UpperCAmelCase__ : Union[str, Any]):
lowerCamelCase : Optional[int] = ['def ', 'class ', 'for ', 'while ']
lowerCamelCase : Tuple = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def UpperCAmelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any=4):
lowerCamelCase : List[str] = example['content'].splitlines()
lowerCamelCase : Union[str, Any] = 0
for line in lines:
counter += line.lower().count('=')
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def UpperCAmelCase ( UpperCAmelCase__ : Dict):
lowerCamelCase : List[str] = tokenizer(example['content'] , truncation=__lowercase)['input_ids']
lowerCamelCase : int = len(example['content']) / len(__lowercase)
return {"ratio": ratio}
def UpperCAmelCase ( UpperCAmelCase__ : Optional[Any]):
lowerCamelCase : List[Any] = {}
results.update(get_hash(__lowercase))
results.update(line_stats(__lowercase))
results.update(alpha_stats(__lowercase))
results.update(char_token_ratio(__lowercase))
results.update(is_autogenerated(__lowercase))
results.update(is_config_or_test(__lowercase))
results.update(has_no_keywords(__lowercase))
results.update(has_few_assignments(__lowercase))
return results
def UpperCAmelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any]):
if not check_uniques(__lowercase , __lowercase):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def UpperCAmelCase ( UpperCAmelCase__ : Dict):
with open(__lowercase , 'rb') as f_in:
with gzip.open(str(__lowercase) + '.gz' , 'wb' , compresslevel=6) as f_out:
shutil.copyfileobj(__lowercase , __lowercase)
os.unlink(__lowercase)
# Settings
A = HfArgumentParser(PreprocessingArguments)
A = parser.parse_args()
if args.num_workers is None:
A = multiprocessing.cpu_count()
A = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
A = time.time()
A = load_dataset(args.dataset_name, split='train')
print(f"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
A = time.time()
A = ds.map(preprocess, num_proc=args.num_workers)
print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
A = set(ds.unique('hash'))
A = len(uniques) / len(ds)
print(f"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
A = time.time()
A = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(f"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
A = time.time()
A = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(f"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
A = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
A = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
A = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
A = str(data_dir / f"""file-{file_number+1:012}.json""")
A = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
| 320 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = LEDConfig
_UpperCAmelCase : int = {}
_UpperCAmelCase : List[str] = "gelu"
def __init__( self : Union[str, Any] , lowercase : Optional[int] , lowercase : Dict=13 , lowercase : Dict=7 , lowercase : Tuple=True , lowercase : Dict=False , lowercase : Dict=99 , lowercase : Any=32 , lowercase : List[Any]=2 , lowercase : List[str]=4 , lowercase : List[str]=37 , lowercase : Dict=0.1 , lowercase : int=0.1 , lowercase : List[Any]=20 , lowercase : int=2 , lowercase : Optional[Any]=1 , lowercase : List[str]=0 , lowercase : Optional[int]=4 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = eos_token_id
_snake_case = pad_token_id
_snake_case = bos_token_id
_snake_case = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case = prepare_led_inputs_dict(lowercase , lowercase , lowercase )
_snake_case = tf.concat(
[tf.zeros_like(lowercase )[:, :-1], tf.ones_like(lowercase )[:, -1:]] , axis=-1 , )
_snake_case = global_attention_mask
return config, inputs_dict
def A ( self : str , lowercase : str , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = TFLEDModel(config=lowercase ).get_decoder()
_snake_case = inputs_dict['input_ids']
_snake_case = input_ids[:1, :]
_snake_case = inputs_dict['attention_mask'][:1, :]
_snake_case = 1
# first forward pass
_snake_case = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
_snake_case , _snake_case = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case = model(lowercase , attention_mask=lowercase )[0]
_snake_case = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case = output_from_no_past[:, -3:, random_slice_idx]
_snake_case = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : str=None , ) -> Union[str, Any]:
if attention_mask is None:
_snake_case = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_UpperCAmelCase : Optional[int] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_UpperCAmelCase : Tuple = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCAmelCase : str = True
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : str = False
_UpperCAmelCase : List[Any] = False
def A ( self : Any ):
'''simple docstring'''
_snake_case = TFLEDModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = tf.zeros_like(inputs_dict['attention_mask'] )
_snake_case = 2
_snake_case = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
_snake_case = True
_snake_case = self.model_tester.seq_length
_snake_case = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase : List[str] ):
_snake_case = outputs.decoder_attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase : List[str] ):
_snake_case = [t.numpy() for t in outputs.encoder_attentions]
_snake_case = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = False
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
_snake_case = len(lowercase )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
if self.is_encoder_decoder:
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_decoder_attentions_output(lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
# Check attention is always last and order is fine
_snake_case = True
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase ) )
self.assertEqual(model.config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def A ( self : List[Any] ):
'''simple docstring'''
pass
def A ( self : Any ):
'''simple docstring'''
pass
def a_ ( __lowercase : str ) -> Optional[Any]:
return tf.constant(__lowercase , dtype=tf.intaa )
_lowerCamelCase : List[Any] = 1E-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 )
def A ( self : str ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 , rtol=1E-3 ) | 686 | 0 |
# flake8: noqa
# Lint as: python3
_UpperCAmelCase = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental | 504 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_lowerCamelCase : Union[str, Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_lowerCamelCase : Union[str, Any] = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
_lowerCamelCase : Optional[int] = '''zero2'''
_lowerCamelCase : List[Any] = '''zero3'''
_lowerCamelCase : Dict = [ZEROa, ZEROa]
def a_ ( __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Tuple ) -> Dict:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_snake_case = parameterized.to_safe_name('_'.join(str(__lowercase ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
_lowerCamelCase : Dict = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : List[Any] , lowercase : Dict ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Any , lowercase : str , lowercase : List[str] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : Optional[Any] , lowercase : Optional[int] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
def A ( self : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
pass
def A ( self : str , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : bool = True , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = models[model]
_snake_case = self.run_trainer(
stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , )
self.do_checks(lowercase )
return output_dir
def A ( self : Any , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : int = 1 , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase )
_snake_case = f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_snake_case = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_snake_case = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_snake_case = self.get_launcher(lowercase )
_snake_case = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase , env=self.get_env() )
return output_dir
def A ( self : List[str] , lowercase : Any=False ):
'''simple docstring'''
_snake_case = min(2 , get_gpu_count() ) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split() | 686 | 0 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : Tuple = XLNetTokenizer
__lowercase : Dict = XLNetTokenizerFast
__lowercase : Optional[int] = True
__lowercase : int = True
def UpperCAmelCase_ ( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : Optional[int] = XLNetTokenizer(__UpperCAmelCase ,keep_accents=__UpperCAmelCase )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : Dict = """<s>"""
lowerCAmelCase__ : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) ,__UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<unk>""" )
self.assertEqual(vocab_keys[1] ,"""<s>""" )
self.assertEqual(vocab_keys[-1] ,"""<eod>""" )
self.assertEqual(len(__UpperCAmelCase ) ,1006 )
def UpperCAmelCase_ ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size ,1000 )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ : str = XLNetTokenizer(__UpperCAmelCase ,keep_accents=__UpperCAmelCase )
lowerCAmelCase__ : str = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__UpperCAmelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) ,[285, 46, 10, 170, 382] )
lowerCAmelCase__ : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__UpperCAmelCase ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
lowerCAmelCase__ : str = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase ,[8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
lowerCAmelCase__ : Optional[int] = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : List[str] = XLNetTokenizer(__UpperCAmelCase ,do_lower_case=__UpperCAmelCase )
lowerCAmelCase__ : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__UpperCAmelCase ,[
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""▁he""", """ll""", """o"""] )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ : List[str] = XLNetTokenizer(__UpperCAmelCase ,do_lower_case=__UpperCAmelCase )
lowerCAmelCase__ : Tuple = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__UpperCAmelCase ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : Optional[int] = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
lowerCAmelCase__ : Optional[Any] = tokenizer.encode("""sequence builders""" ,add_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ : Any = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
lowerCAmelCase__ : Any = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase ,__UpperCAmelCase )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : int = {"""input_ids""": [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase ,model_name="""xlnet-base-cased""" ,revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" ,)
| 565 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCamelCase : int = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 0 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : Collection[float] | None = None ):
if components is None:
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : List[Any] = list(UpperCAmelCase_ )
def __len__( self : Optional[Any] ):
return len(self.__components )
def __str__( self : Dict ):
return "(" + ",".join(map(UpperCAmelCase_ , self.__components ) ) + ")"
def __add__( self : Tuple , UpperCAmelCase_ : Vector ):
SCREAMING_SNAKE_CASE : Optional[Any] = len(self )
if size == len(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = [self.__components[i] + other.component(UpperCAmelCase_ ) for i in range(UpperCAmelCase_ )]
return Vector(UpperCAmelCase_ )
else:
raise Exception("must have the same size" )
def __sub__( self : str , UpperCAmelCase_ : Vector ):
SCREAMING_SNAKE_CASE : str = len(self )
if size == len(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.__components[i] - other.component(UpperCAmelCase_ ) for i in range(UpperCAmelCase_ )]
return Vector(UpperCAmelCase_ )
else: # error case
raise Exception("must have the same size" )
@overload
def __mul__( self : Dict , UpperCAmelCase_ : float ):
...
@overload
def __mul__( self : Tuple , UpperCAmelCase_ : Vector ):
...
def __mul__( self : Optional[int] , UpperCAmelCase_ : float | Vector ):
if isinstance(UpperCAmelCase_ , (float, int) ):
SCREAMING_SNAKE_CASE : Optional[int] = [c * other for c in self.__components]
return Vector(UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and len(self ) == len(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : List[str] = len(self )
SCREAMING_SNAKE_CASE : List[str] = [self.__components[i] * other.component(UpperCAmelCase_ ) for i in range(UpperCAmelCase_ )]
return sum(UpperCAmelCase_ )
else: # error case
raise Exception("invalid operand!" )
def _A ( self : List[Any] ):
return Vector(self.__components )
def _A ( self : int , UpperCAmelCase_ : int ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("index out of range" )
def _A ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float ):
assert -len(self.__components ) <= pos < len(self.__components )
SCREAMING_SNAKE_CASE : int = value
def _A ( self : Union[str, Any] ):
if len(self.__components ) == 0:
raise Exception("Vector is empty" )
SCREAMING_SNAKE_CASE : Any = [c**2 for c in self.__components]
return math.sqrt(sum(UpperCAmelCase_ ) )
def _A ( self : Dict , UpperCAmelCase_ : Vector , UpperCAmelCase_ : bool = False ):
SCREAMING_SNAKE_CASE : Optional[int] = self * other
SCREAMING_SNAKE_CASE : Any = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert isinstance(__lowercase , __lowercase )
return Vector([0] * dimension )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert isinstance(__lowercase , __lowercase ) and (isinstance(__lowercase , __lowercase ))
SCREAMING_SNAKE_CASE : Optional[int] = [0] * dimension
SCREAMING_SNAKE_CASE : Tuple = 1
return Vector(__lowercase )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
assert (
isinstance(__lowercase , __lowercase )
and isinstance(__lowercase , __lowercase )
and (isinstance(__lowercase , (int, float) ))
)
return x * scalar + y
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
random.seed(__lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = [random.randint(__lowercase , __lowercase ) for _ in range(__lowercase )]
return Vector(__lowercase )
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : list[list[float]] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : List[str] = matrix
SCREAMING_SNAKE_CASE : Optional[Any] = w
SCREAMING_SNAKE_CASE : Tuple = h
def __str__( self : List[Any] ):
SCREAMING_SNAKE_CASE : Dict = ""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : int , UpperCAmelCase_ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
SCREAMING_SNAKE_CASE : Optional[int] = []
for i in range(self.__height ):
SCREAMING_SNAKE_CASE : List[Any] = [
self.__matrix[i][j] + other.component(UpperCAmelCase_ , UpperCAmelCase_ )
for j in range(self.__width )
]
matrix.append(UpperCAmelCase_ )
return Matrix(UpperCAmelCase_ , self.__width , self.__height )
else:
raise Exception("matrix must have the same dimension!" )
def __sub__( self : str , UpperCAmelCase_ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
SCREAMING_SNAKE_CASE : str = []
for i in range(self.__height ):
SCREAMING_SNAKE_CASE : Dict = [
self.__matrix[i][j] - other.component(UpperCAmelCase_ , UpperCAmelCase_ )
for j in range(self.__width )
]
matrix.append(UpperCAmelCase_ )
return Matrix(UpperCAmelCase_ , self.__width , self.__height )
else:
raise Exception("matrices must have the same dimension!" )
@overload
def __mul__( self : Any , UpperCAmelCase_ : float ):
...
@overload
def __mul__( self : int , UpperCAmelCase_ : Vector ):
...
def __mul__( self : Tuple , UpperCAmelCase_ : float | Vector ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): # matrix-vector
if len(UpperCAmelCase_ ) == self.__width:
SCREAMING_SNAKE_CASE : Optional[Any] = zero_vector(self.__height )
for i in range(self.__height ):
SCREAMING_SNAKE_CASE : int = [
self.__matrix[i][j] * other.component(UpperCAmelCase_ )
for j in range(self.__width )
]
ans.change_component(UpperCAmelCase_ , sum(UpperCAmelCase_ ) )
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!" )
elif isinstance(UpperCAmelCase_ , (int, float) ): # matrix-scalar
SCREAMING_SNAKE_CASE : str = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(UpperCAmelCase_ , self.__width , self.__height )
return None
def _A ( self : Optional[Any] ):
return self.__height
def _A ( self : Any ):
return self.__width
def _A ( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds" )
def _A ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float ):
if 0 <= x < self.__height and 0 <= y < self.__width:
SCREAMING_SNAKE_CASE : int = value
else:
raise Exception("change_component: indices out of bounds" )
def _A ( self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
if self.__height != self.__width:
raise Exception("Matrix is not square" )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(UpperCAmelCase_ ) ):
SCREAMING_SNAKE_CASE : Any = minor[i][:y] + minor[i][y + 1 :]
return Matrix(UpperCAmelCase_ , self.__width - 1 , self.__height - 1 ).determinant()
def _A ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(UpperCAmelCase_ , UpperCAmelCase_ )
else:
raise Exception("Indices out of bounds" )
def _A ( self : Tuple ):
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if self.__height < 1:
raise Exception("Matrix has no element" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
SCREAMING_SNAKE_CASE : str = [
self.__matrix[0][y] * self.cofactor(0 , UpperCAmelCase_ ) for y in range(self.__width )
]
return sum(UpperCAmelCase_ )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [[0] * n for _ in range(__lowercase )]
return Matrix(__lowercase , __lowercase , __lowercase )
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
random.seed(__lowercase )
SCREAMING_SNAKE_CASE : int = [
[random.randint(__lowercase , __lowercase ) for _ in range(__lowercase )] for _ in range(__lowercase )
]
return Matrix(__lowercase , __lowercase , __lowercase )
| 62 |
import random
from .binary_exp_mod import bin_exp_mod
def a_ ( __lowercase : int , __lowercase : Any=1_000 ) -> int:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_snake_case = n - 1
_snake_case = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_snake_case = 0
while count < prec:
_snake_case = random.randint(2 , n - 1 )
_snake_case = bin_exp_mod(__lowercase , __lowercase , __lowercase )
if b != 1:
_snake_case = True
for _ in range(__lowercase ):
if b == n - 1:
_snake_case = False
break
_snake_case = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_lowerCamelCase : Tuple = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i))) | 686 | 0 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase : Optional[int] = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase : int = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> tuple[str, float]:
snake_case : str = len([g for position, g in enumerate(__lowercase ) if g == main_target[position]] )
return (item, float(__lowercase ))
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> tuple[str, str]:
snake_case : Dict = random.randint(0 ,len(__lowercase ) - 1 )
snake_case : Optional[Any] = parent_a[:random_slice] + parent_a[random_slice:]
snake_case : Any = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str:
snake_case : List[Any] = list(__lowercase )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
snake_case : Union[str, Any] = random.choice(__lowercase )
return "".join(__lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,) -> list[str]:
snake_case : Union[str, Any] = []
# Generate more children proportionally to the fitness score.
snake_case : Optional[Any] = int(parent_a[1] * 100 ) + 1
snake_case : List[str] = 10 if child_n >= 10 else child_n
for _ in range(__lowercase ):
snake_case : Tuple = population_score[random.randint(0 ,__lowercase )][0]
snake_case , snake_case : Optional[Any] = crossover(parent_a[0] ,__lowercase )
# Append new string to the population list.
pop.append(mutate(__lowercase ,__lowercase ) )
pop.append(mutate(__lowercase ,__lowercase ) )
return pop
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
snake_case : List[str] = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(__lowercase )
# Verify that the target contains no genes besides the ones inside genes variable.
snake_case : Optional[int] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
snake_case : Any = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(__lowercase )
# Generate random starting population.
snake_case : Dict = []
for _ in range(__lowercase ):
population.append("""""".join([random.choice(__lowercase ) for i in range(len(__lowercase ) )] ) )
# Just some logs to know what the algorithms is doing.
snake_case , snake_case : Union[str, Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__lowercase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
snake_case : List[str] = [evaluate(__lowercase ,__lowercase ) for item in population]
# Check if there is a matching evolution.
snake_case : Dict = sorted(__lowercase ,key=lambda lowercase : x[1] ,reverse=__lowercase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
snake_case : List[str] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__lowercase )
# Normalize population score to be between 0 and 1.
snake_case : Any = [
(item, score / len(__lowercase )) for item, score in population_score
]
# This is selection
for i in range(__lowercase ):
population.extend(select(population_score[int(__lowercase )] ,__lowercase ,__lowercase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__lowercase ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase : int = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
lowerCamelCase : Any = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
lowerCamelCase : Optional[int] = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 587 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_lowerCamelCase : int = re.compile(r'''\s+''')
def a_ ( __lowercase : List[Any] ) -> int:
return {"hash": hashlib.mda(re.sub(__lowercase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def a_ ( __lowercase : List[Any] ) -> Dict:
_snake_case = [len(__lowercase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(__lowercase ), "line_max": max(__lowercase )}
def a_ ( __lowercase : Optional[int] ) -> List[str]:
_snake_case = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] ) -> Optional[int]:
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def a_ ( __lowercase : Union[str, Any] , __lowercase : int=5 ) -> Optional[Any]:
_snake_case = ['auto-generated', 'autogenerated', 'automatically generated']
_snake_case = example['content'].splitlines()
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a_ ( __lowercase : List[Any] , __lowercase : int=5 , __lowercase : Tuple=0.0_5 ) -> Union[str, Any]:
_snake_case = ['unit tests', 'test file', 'configuration file']
_snake_case = example['content'].splitlines()
_snake_case = 0
_snake_case = 0
# first test
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_snake_case = example['content'].count('\n' )
_snake_case = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a_ ( __lowercase : Union[str, Any] ) -> Any:
_snake_case = ['def ', 'class ', 'for ', 'while ']
_snake_case = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a_ ( __lowercase : Tuple , __lowercase : Any=4 ) -> List[str]:
_snake_case = example['content'].splitlines()
_snake_case = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a_ ( __lowercase : Dict ) -> Dict:
_snake_case = tokenizer(example['content'] , truncation=__lowercase )['input_ids']
_snake_case = len(example['content'] ) / len(__lowercase )
return {"ratio": ratio}
def a_ ( __lowercase : Optional[Any] ) -> Any:
_snake_case = {}
results.update(get_hash(__lowercase ) )
results.update(line_stats(__lowercase ) )
results.update(alpha_stats(__lowercase ) )
results.update(char_token_ratio(__lowercase ) )
results.update(is_autogenerated(__lowercase ) )
results.update(is_config_or_test(__lowercase ) )
results.update(has_no_keywords(__lowercase ) )
results.update(has_few_assignments(__lowercase ) )
return results
def a_ ( __lowercase : Optional[int] , __lowercase : str , __lowercase : List[Any] ) -> int:
if not check_uniques(__lowercase , __lowercase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a_ ( __lowercase : Dict ) -> Dict:
with open(__lowercase , 'rb' ) as f_in:
with gzip.open(str(__lowercase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(__lowercase , __lowercase )
os.unlink(__lowercase )
# Settings
_lowerCamelCase : Dict = HfArgumentParser(PreprocessingArguments)
_lowerCamelCase : Dict = parser.parse_args()
if args.num_workers is None:
_lowerCamelCase : int = multiprocessing.cpu_count()
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_lowerCamelCase : Any = time.time()
_lowerCamelCase : Optional[Any] = load_dataset(args.dataset_name, split='''train''')
print(F'Time to load dataset: {time.time()-t_start:.2f}')
# Run preprocessing
_lowerCamelCase : Optional[int] = time.time()
_lowerCamelCase : Union[str, Any] = ds.map(preprocess, num_proc=args.num_workers)
print(F'Time to preprocess dataset: {time.time()-t_start:.2f}')
# Deduplicate hashes
_lowerCamelCase : List[Any] = set(ds.unique('''hash'''))
_lowerCamelCase : Dict = len(uniques) / len(ds)
print(F'Fraction of duplicates: {1-frac:.2%}')
# Deduplicate data and apply heuristics
_lowerCamelCase : List[Any] = time.time()
_lowerCamelCase : Optional[int] = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F'Time to filter dataset: {time.time()-t_start:.2f}')
print(F'Size of filtered dataset: {len(ds_filter)}')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_lowerCamelCase : Union[str, Any] = time.time()
_lowerCamelCase , _lowerCamelCase : Dict = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'Time to deduplicate dataset: {time.time()-t_start:.2f}')
print(F'Size of deduplicate dataset: {len(ds_filter)}')
# Save data in batches of samples_per_file
_lowerCamelCase : Optional[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
_lowerCamelCase : int = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
_lowerCamelCase : Union[str, Any] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_lowerCamelCase : Dict = str(data_dir / F'file-{file_number+1:012}.json')
_lowerCamelCase : str = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'Time to save dataset: {time.time()-t_start:.2f}') | 686 | 0 |
def __snake_case ( _lowerCAmelCase : int = 1000 ) -> int:
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 454 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : int = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = "yolos"
def __init__( self : int , lowercase : List[str]=768 , lowercase : Tuple=12 , lowercase : int=12 , lowercase : int=3_072 , lowercase : Optional[int]="gelu" , lowercase : str=0.0 , lowercase : Optional[int]=0.0 , lowercase : Optional[Any]=0.02 , lowercase : List[str]=1E-12 , lowercase : Dict=[512, 864] , lowercase : Union[str, Any]=16 , lowercase : List[Any]=3 , lowercase : List[str]=True , lowercase : Optional[int]=100 , lowercase : int=True , lowercase : Dict=False , lowercase : str=1 , lowercase : int=5 , lowercase : Tuple=2 , lowercase : List[str]=5 , lowercase : Any=2 , lowercase : List[str]=0.1 , **lowercase : int , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = qkv_bias
_snake_case = num_detection_tokens
_snake_case = use_mid_position_embeddings
_snake_case = auxiliary_loss
# Hungarian matcher
_snake_case = class_cost
_snake_case = bbox_cost
_snake_case = giou_cost
# Loss coefficients
_snake_case = bbox_loss_coefficient
_snake_case = giou_loss_coefficient
_snake_case = eos_coefficient
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = version.parse("1.11" )
@property
def A ( self : str ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Any ):
'''simple docstring'''
return 1E-4
@property
def A ( self : List[Any] ):
'''simple docstring'''
return 12 | 686 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase =logging.get_logger(__name__)
lowerCamelCase ={
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = "lilt"
def __init__( self , __SCREAMING_SNAKE_CASE=3_0_5_2_2 , __SCREAMING_SNAKE_CASE=7_6_8 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=3_0_7_2 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=1_0_2_4 , **__SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = vocab_size
UpperCamelCase__ : List[Any] = hidden_size
UpperCamelCase__ : Union[str, Any] = num_hidden_layers
UpperCamelCase__ : str = num_attention_heads
UpperCamelCase__ : Any = hidden_act
UpperCamelCase__ : Any = intermediate_size
UpperCamelCase__ : Union[str, Any] = hidden_dropout_prob
UpperCamelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase__ : List[str] = max_position_embeddings
UpperCamelCase__ : Optional[Any] = type_vocab_size
UpperCamelCase__ : Tuple = initializer_range
UpperCamelCase__ : str = layer_norm_eps
UpperCamelCase__ : Optional[Any] = position_embedding_type
UpperCamelCase__ : Optional[Any] = classifier_dropout
UpperCamelCase__ : str = channel_shrink_ratio
UpperCamelCase__ : int = max_ad_position_embeddings
| 285 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_lowerCamelCase : Tuple = logging.get_logger(__name__)
# General docstring
_lowerCamelCase : Union[str, Any] = '''ResNetConfig'''
# Base docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[Any] = [1, 2_048, 7, 7]
# Image classification docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[int] = '''tiger cat'''
_lowerCamelCase : str = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 3 , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(
lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=kernel_size // 2 , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def A ( self : Union[str, Any] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def A ( self : Tuple , lowercase : Tensor ):
'''simple docstring'''
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowercase )
_snake_case = self.pooler(lowercase )
return embedding
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase : int , lowercase : int , lowercase : int = 2 ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(lowercase , lowercase , kernel_size=1 , stride=lowercase , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : List[str] , lowercase : List[str] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" , lowercase : int = 4 ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , kernel_size=1 ) , ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : Dict , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowercase : ResNetConfig , lowercase : int , lowercase : int , lowercase : int = 2 , lowercase : int = 2 , ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , stride=lowercase , activation=config.hidden_act ) , *[layer(lowercase , lowercase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase , config.depths[1:] ):
self.stages.append(ResNetStage(lowercase , lowercase , lowercase , depth=lowercase ) )
def A ( self : str , lowercase : Tensor , lowercase : bool = False , lowercase : bool = True ):
'''simple docstring'''
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowercase )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase , hidden_states=lowercase , )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = ResNetConfig
_UpperCAmelCase : Tuple = "resnet"
_UpperCAmelCase : Optional[Any] = "pixel_values"
_UpperCAmelCase : Dict = True
def A ( self : List[str] , lowercase : Dict ):
'''simple docstring'''
if isinstance(lowercase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowercase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def A ( self : Tuple , lowercase : List[Any] , lowercase : Optional[Any]=False ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
_snake_case = value
_lowerCamelCase : str = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCamelCase : int = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : Any ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : Union[str, Any] , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowercase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase : int ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowercase )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Union[str, Any] , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[torch.LongTensor] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowercase )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowercase , lowercase )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : Union[str, Any] ):
'''simple docstring'''
super().__init__(lowercase )
super()._init_backbone(lowercase )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@replace_return_docstrings(output_type=lowercase , config_class=_CONFIG_FOR_DOC )
def A ( self : Dict , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowercase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowercase , ) | 686 | 0 |
import random
from .binary_exp_mod import bin_exp_mod
def UpperCamelCase__( UpperCamelCase__ : int , UpperCamelCase__ : Any=10_00 )->int:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
A__ = n - 1
A__ = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
A__ = 0
while count < prec:
A__ = random.randint(2 , n - 1 )
A__ = bin_exp_mod(__lowercase , __lowercase , __lowercase )
if b != 1:
A__ = True
for _ in range(__lowercase ):
if b == n - 1:
A__ = False
break
A__ = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
a__: Tuple = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 190 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 0 |
def __lowerCAmelCase ( A_ : str , A_ : str ) -> str:
__UpperCAmelCase = len(__lowercase )
__UpperCAmelCase = len(__lowercase )
__UpperCAmelCase = (
first_str_length if first_str_length > second_str_length else second_str_length
)
__UpperCAmelCase = []
for char_count in range(__lowercase ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(__lowercase )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 221 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def a_ ( __lowercase : Union[str, Any] ) -> List[Any]:
_snake_case = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
_snake_case = DetaConfig(
backbone_config=__lowercase , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=__lowercase , with_box_refine=__lowercase , two_stage=__lowercase , )
# set labels
_snake_case = 'huggingface/label-files'
if "o365" in model_name:
_snake_case = 366
_snake_case = 'object365-id2label.json'
else:
_snake_case = 91
_snake_case = 'coco-detection-id2label.json'
_snake_case = num_labels
_snake_case = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type='dataset' ) ) , 'r' ) )
_snake_case = {int(__lowercase ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def a_ ( __lowercase : int ) -> str:
_snake_case = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : str ) -> Union[str, Any]:
_snake_case = dct.pop(__lowercase )
_snake_case = val
def a_ ( __lowercase : List[str] , __lowercase : str ) -> Dict:
_snake_case = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_snake_case = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:dim, :]
_snake_case = in_proj_bias[: dim]
_snake_case = in_proj_weight[
dim : dim * 2, :
]
_snake_case = in_proj_bias[
dim : dim * 2
]
_snake_case = in_proj_weight[
-dim :, :
]
_snake_case = in_proj_bias[-dim :]
# fmt: on
def a_ ( __lowercase : Dict , __lowercase : Dict ) -> str:
# transformer decoder self-attention layers
_snake_case = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:hidden_size, :]
_snake_case = in_proj_bias[:hidden_size]
_snake_case = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_snake_case = in_proj_bias[hidden_size : hidden_size * 2]
_snake_case = in_proj_weight[-hidden_size:, :]
_snake_case = in_proj_bias[-hidden_size:]
def a_ ( ) -> List[str]:
_snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a_ ( __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Tuple ) -> Optional[Any]:
_snake_case = get_deta_config(__lowercase )
# load original state dict
if model_name == "deta-swin-large":
_snake_case = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
_snake_case = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
_snake_case = torch.load(__lowercase , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(__lowercase , param.shape )
# rename keys
_snake_case = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_swin_q_k_v(__lowercase , config.backbone_config )
read_in_decoder_q_k_v(__lowercase , __lowercase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "input_proj" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
# finally, create HuggingFace model and load state dict
_snake_case = DetaForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
_snake_case = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(__lowercase )
# load image processor
_snake_case = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
_snake_case = prepare_img()
_snake_case = processor(images=__lowercase , return_tensors='pt' )
_snake_case = encoding['pixel_values']
_snake_case = model(pixel_values.to(__lowercase ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_snake_case = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
_snake_case = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
_snake_case = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
_snake_case = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__lowercase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__lowercase ) , atol=1E-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 686 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
snake_case_ : List[str] = logging.get_logger(__name__)
class lowercase__ ( snake_case_ ):
'''simple docstring'''
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 212 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_lowerCamelCase : Dict = '''pt'''
elif is_tf_available():
_lowerCamelCase : List[str] = '''tf'''
else:
_lowerCamelCase : List[Any] = '''jax'''
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = PerceiverTokenizer
_UpperCAmelCase : Optional[int] = False
def A ( self : Tuple ):
'''simple docstring'''
super().setUp()
_snake_case = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A ( self : str ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def A ( self : Optional[int] , **lowercase : Dict ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : Optional[int] , lowercase : Tuple , lowercase : Optional[Any]=False , lowercase : int=20 , lowercase : Optional[int]=5 ):
'''simple docstring'''
_snake_case = []
for i in range(len(lowercase ) ):
try:
_snake_case = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_snake_case = list(filter(lambda lowercase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , lowercase ) )
_snake_case = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
_snake_case = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
_snake_case = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case = [t[0] for t in toks]
# Ensure consistency
_snake_case = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
_snake_case = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
_snake_case = ' ' + output_txt
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = 'Unicode €.'
_snake_case = tokenizer(lowercase )
_snake_case = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_snake_case = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]Unicode €.[SEP]' )
_snake_case = tokenizer('e è é ê ë' )
_snake_case = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_snake_case = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_snake_case = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
if FRAMEWORK != "jax":
_snake_case = list(batch.input_ids.numpy()[0] )
else:
_snake_case = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , lowercase )
self.assertIn('attention_mask' , lowercase )
self.assertNotIn('decoder_input_ids' , lowercase )
self.assertNotIn('decoder_attention_mask' , lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = [
'Summary of the text.',
'Another summary.',
]
_snake_case = tokenizer(
text_target=lowercase , max_length=32 , padding='max_length' , truncation=lowercase , return_tensors=lowercase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_snake_case = tokenizer.__class__.from_pretrained(lowercase )
_snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
shutil.rmtree(lowercase )
_snake_case = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_snake_case = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_snake_case = tokenizer.__class__.from_pretrained(lowercase )
_snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_snake_case = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowercase )
_snake_case = [f'''<extra_id_{i}>''' for i in range(125 )]
_snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
_snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case = tokenizer_class.from_pretrained(
lowercase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=lowercase )]
_snake_case = tokenizer_class.from_pretrained(
lowercase , additional_special_tokens=lowercase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
pass
def A ( self : List[str] ):
'''simple docstring'''
pass
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : int ):
'''simple docstring'''
_snake_case = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_snake_case = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
_snake_case = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(lowercase , lowercase ) | 686 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def __lowerCamelCase ( self ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 128, 128) ,rng=random.Random(_A ) )
_lowerCAmelCase : Tuple = np.random.RandomState(_A )
_lowerCAmelCase : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.7_5,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Dict = self.get_dummy_inputs()
_lowerCAmelCase : Optional[Any] = pipe(**_A ).images
_lowerCAmelCase : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : Optional[int] = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
_lowerCAmelCase : Dict = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Tuple = self.get_dummy_inputs()
_lowerCAmelCase : Any = pipe(**_A ).images
_lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : int = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
_lowerCAmelCase : Any = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
# warmup pass to apply optimizations
_lowerCAmelCase : Dict = pipe(**self.get_dummy_inputs() )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs()
_lowerCAmelCase : Tuple = pipe(**_A ).images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : Any = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
_lowerCAmelCase : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs()
_lowerCAmelCase : Optional[int] = pipe(**_A ).images
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : str = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
_lowerCAmelCase : str = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : str = self.get_dummy_inputs()
_lowerCAmelCase : Optional[Any] = pipe(**_A ).images
_lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : List[str] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
_lowerCAmelCase : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[int] = self.get_dummy_inputs()
_lowerCAmelCase : List[str] = pipe(**_A ).images
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : int = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = ort.SessionOptions()
_lowerCAmelCase : List[Any] = False
return options
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
_lowerCAmelCase : Any = init_image.resize((768, 512) )
# using the PNDM scheduler by default
_lowerCAmelCase : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=_A ,feature_extractor=_A ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : List[str] = 'A fantasy landscape, trending on artstation'
_lowerCAmelCase : List[str] = np.random.RandomState(0 )
_lowerCAmelCase : int = pipe(
prompt=_A ,image=_A ,strength=0.7_5 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=_A ,output_type='np' ,)
_lowerCAmelCase : int = output.images
_lowerCAmelCase : Dict = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
_lowerCAmelCase : Union[str, Any] = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
_lowerCAmelCase : str = init_image.resize((768, 512) )
_lowerCAmelCase : str = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' )
_lowerCAmelCase : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=_A ,safety_checker=_A ,feature_extractor=_A ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = 'A fantasy landscape, trending on artstation'
_lowerCAmelCase : Optional[Any] = np.random.RandomState(0 )
_lowerCAmelCase : Optional[int] = pipe(
prompt=_A ,image=_A ,strength=0.7_5 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=_A ,output_type='np' ,)
_lowerCAmelCase : Optional[int] = output.images
_lowerCAmelCase : Union[str, Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
_lowerCAmelCase : int = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 259 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def a_ ( ) -> Optional[int]:
_snake_case , _snake_case = 9, 14 # noqa: F841
_snake_case = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_snake_case = defaultdict(__lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_snake_case = mst(__lowercase )
_snake_case = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_snake_case = tuple(answer[:2] )
_snake_case = tuple(edge[::-1] )
assert edge in result or reverse in result | 686 | 0 |
'''simple docstring'''
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str):
if len(__lowercase) != len(__lowercase):
raise ValueError('String lengths must match!')
lowerCamelCase : Union[str, Any] = 0
for chara, chara in zip(__lowercase , __lowercase):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 320 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Tuple = ["transformers", "torch", "note_seq"]
def __init__( self : List[Any] , *lowercase : List[Any] , **lowercase : Dict ):
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] ) | 686 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.