code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class lowercase_ (unittest.TestCase ):
snake_case =StableDiffusionLDMaDPipeline
snake_case =TEXT_TO_IMAGE_PARAMS
snake_case =TEXT_TO_IMAGE_BATCH_PARAMS
snake_case =TEXT_TO_IMAGE_IMAGE_PARAMS
def __UpperCamelCase ( self) -> Any:
torch.manual_seed(0)
a__ =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
a__ =DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , )
torch.manual_seed(0)
a__ =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0)
a__ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
a__ =CLIPTextModel(lowercase_)
a__ =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
a__ ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCamelCase ( self , lowercase_ , lowercase_=0) -> List[str]:
if str(lowercase_).startswith('mps'):
a__ =torch.manual_seed(lowercase_)
else:
a__ =torch.Generator(device=lowercase_).manual_seed(lowercase_)
a__ ={
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __UpperCamelCase ( self) -> Any:
a__ ='cpu' # ensure determinism for the device-dependent torch.Generator
a__ =self.get_dummy_components()
a__ =StableDiffusionLDMaDPipeline(**lowercase_)
a__ =ldmad_pipe.to(lowercase_)
ldmad_pipe.set_progress_bar_config(disable=lowercase_)
a__ =self.get_dummy_inputs(lowercase_)
a__ =ldmad_pipe(**lowercase_)
a__ , a__ =output.rgb, output.depth
a__ =rgb[0, -3:, -3:, -1]
a__ =depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
a__ =np.array(
[0.37_33_81_76, 0.7_02_47, 0.74_20_31_93, 0.51_64_36_04, 0.58_25_67_93, 0.60_93_21_36, 0.4_18_10_95, 0.48_35_58_77, 0.46_53_52_62])
a__ =np.array([1_03.4_67_27, 85.81_20_04, 87.84_92_36])
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth).max() < 1e-2
def __UpperCamelCase ( self) -> Optional[int]:
a__ =self.get_dummy_components()
a__ =StableDiffusionLDMaDPipeline(**lowercase_)
a__ =ldmad_pipe.to(lowercase_)
ldmad_pipe.set_progress_bar_config(disable=lowercase_)
a__ =self.get_dummy_inputs(lowercase_)
a__ =3 * [inputs['prompt']]
# forward
a__ =ldmad_pipe(**lowercase_)
a__ , a__ =output.rgb, output.depth
a__ =rgb_slice_a[0, -3:, -3:, -1]
a__ =depth_slice_a[0, -3:, -1]
a__ =self.get_dummy_inputs(lowercase_)
a__ =3 * [inputs.pop('prompt')]
a__ =ldmad_pipe.tokenizer(
lowercase_ , padding='max_length' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=lowercase_ , return_tensors='pt' , )
a__ =text_inputs['input_ids'].to(lowercase_)
a__ =ldmad_pipe.text_encoder(lowercase_)[0]
a__ =prompt_embeds
# forward
a__ =ldmad_pipe(**lowercase_)
a__ , a__ =output.rgb, output.depth
a__ =rgb_slice_a[0, -3:, -3:, -1]
a__ =depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten()).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten()).max() < 1e-4
def __UpperCamelCase ( self) -> Dict:
a__ ='cpu' # ensure determinism for the device-dependent torch.Generator
a__ =self.get_dummy_components()
a__ =PNDMScheduler(skip_prk_steps=lowercase_)
a__ =StableDiffusionLDMaDPipeline(**lowercase_)
a__ =ldmad_pipe.to(lowercase_)
ldmad_pipe.set_progress_bar_config(disable=lowercase_)
a__ =self.get_dummy_inputs(lowercase_)
a__ ='french fries'
a__ =ldmad_pipe(**lowercase_ , negative_prompt=lowercase_)
a__ , a__ =output.rgb, output.depth
a__ =rgb[0, -3:, -3:, -1]
a__ =depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
a__ =np.array(
[0.3_70_44, 0.71_81_15_03, 0.7_22_32_51, 0.48_60_36_75, 0.5_63_83_91, 0.6_36_49_48, 0.42_83_37_04, 0.4_90_13_15, 0.47_92_62_17])
a__ =np.array([1_07.8_47_38, 84.6_28_02, 89.96_21_35])
assert np.abs(rgb_slice.flatten() - expected_slice_rgb).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth).max() < 1e-2
@slow
@require_torch_gpu
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self , lowercase_ , lowercase_="cpu" , lowercase_=torch.floataa , lowercase_=0) -> Optional[Any]:
a__ =torch.Generator(device=lowercase_).manual_seed(lowercase_)
a__ =np.random.RandomState(lowercase_).standard_normal((1, 4, 64, 64))
a__ =torch.from_numpy(lowercase_).to(device=lowercase_ , dtype=lowercase_)
a__ ={
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def __UpperCamelCase ( self) -> List[Any]:
a__ =StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d')
a__ =ldmad_pipe.to(lowercase_)
ldmad_pipe.set_progress_bar_config(disable=lowercase_)
a__ =self.get_inputs(lowercase_)
a__ =ldmad_pipe(**lowercase_)
a__ , a__ =output.rgb, output.depth
a__ =rgb[0, -3:, -3:, -1].flatten()
a__ =rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
a__ =np.array(
[0.53_80_54_65, 0.56_70_73_05, 0.5_48_65_15, 0.57_01_22_36, 0.5_81_45_11, 0.56_25_34_87, 0.54_84_30_14, 0.55_09_22_63, 0.6_45_97_06])
a__ =np.array(
[0.9_26_37_81, 0.6_67_86_72, 0.5_48_65_15, 0.92_20_21_45, 0.67_83_11_35, 0.56_25_34_87, 0.9_24_16_94, 0.7_55_14_78, 0.6_45_97_06])
assert np.abs(rgb_slice - expected_slice_rgb).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth).max() < 3e-3
@nightly
@require_torch_gpu
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self , lowercase_ , lowercase_="cpu" , lowercase_=torch.floataa , lowercase_=0) -> List[str]:
a__ =torch.Generator(device=lowercase_).manual_seed(lowercase_)
a__ =np.random.RandomState(lowercase_).standard_normal((1, 4, 64, 64))
a__ =torch.from_numpy(lowercase_).to(device=lowercase_ , dtype=lowercase_)
a__ ={
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def __UpperCamelCase ( self) -> List[Any]:
a__ =StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d').to(lowercase_)
ldmad_pipe.set_progress_bar_config(disable=lowercase_)
a__ =self.get_inputs(lowercase_)
a__ =ldmad_pipe(**lowercase_)
a__ , a__ =output.rgb, output.depth
a__ =0.49_55_86
a__ =0.33_79_55_15
a__ =1_12.4_85_18
a__ =98.48_97_46
assert np.abs(expected_rgb_mean - rgb.mean()) < 1e-3
assert np.abs(expected_rgb_std - rgb.std()) < 1e-3
assert np.abs(expected_depth_mean - depth.mean()) < 1e-3
assert np.abs(expected_depth_std - depth.std()) < 1e-3
def __UpperCamelCase ( self) -> Optional[int]:
a__ =StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c').to(lowercase_)
ldmad_pipe.set_progress_bar_config(disable=lowercase_)
a__ =self.get_inputs(lowercase_)
a__ =ldmad_pipe(**lowercase_)
a__ , a__ =output.rgb, output.depth
a__ =0.4_19_41_27
a__ =0.35_37_55_86
a__ =0.5_63_85_02
a__ =0.34_68_61_03
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean()) < 1e-3
assert np.abs(expected_rgb_std - rgb.std()) < 1e-3
assert np.abs(expected_depth_mean - depth.mean()) < 1e-3
assert np.abs(expected_depth_std - depth.std()) < 1e-3
| 20 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ) -> Union[str, Any]:
__lowerCAmelCase = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
__lowerCAmelCase = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(lowercase )
# Let's go
__lowerCAmelCase = parser.parse_args()
if not hasattr(lowercase , """func""" ):
parser.print_help()
exit(1 )
# Run
__lowerCAmelCase = args.func(lowercase )
service.run()
if __name__ == "__main__":
main()
| 689 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"tanreinama/GPTSAN-2.8B-spout_is_uniform": (
"https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"
),
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """gptsan-japanese"""
UpperCamelCase = [
"""past_key_values""",
]
UpperCamelCase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :str , __snake_case :str=3_60_00 , __snake_case :List[Any]=12_80 , __snake_case :Tuple=10_24 , __snake_case :Union[str, Any]=81_92 , __snake_case :Dict=40_96 , __snake_case :Tuple=1_28 , __snake_case :Union[str, Any]=10 , __snake_case :List[Any]=0 , __snake_case :int=16 , __snake_case :Tuple=16 , __snake_case :int=1_28 , __snake_case :Optional[Any]=0.0 , __snake_case :Any=1E-5 , __snake_case :str=False , __snake_case :Dict=0.0 , __snake_case :str="float32" , __snake_case :int=False , __snake_case :int=False , __snake_case :Optional[int]=False , __snake_case :Tuple=0.002 , __snake_case :Any=False , __snake_case :Optional[Any]=True , __snake_case :Optional[int]=3_59_98 , __snake_case :Dict=3_59_95 , __snake_case :Optional[int]=3_59_99 , **__snake_case :Union[str, Any] , ):
'''simple docstring'''
__magic_name__ : List[str] =vocab_size
__magic_name__ : Any =max_position_embeddings
__magic_name__ : int =d_model
__magic_name__ : Any =d_ff
__magic_name__ : Dict =d_ext
__magic_name__ : Union[str, Any] =d_spout
__magic_name__ : List[Any] =num_switch_layers
__magic_name__ : int =num_ext_layers
__magic_name__ : Optional[int] =num_switch_layers + num_ext_layers
__magic_name__ : Union[str, Any] =num_heads
__magic_name__ : Any =num_experts
__magic_name__ : Optional[int] =expert_capacity
__magic_name__ : Union[str, Any] =dropout_rate
__magic_name__ : Any =layer_norm_epsilon
__magic_name__ : Union[str, Any] =router_bias
__magic_name__ : int =router_jitter_noise
__magic_name__ : str =router_dtype
__magic_name__ : Optional[int] =router_ignore_padding_tokens
__magic_name__ : str =output_hidden_states
__magic_name__ : Dict =output_attentions
__magic_name__ : Dict =initializer_factor
__magic_name__ : List[str] =output_router_logits
__magic_name__ : int =use_cache
super().__init__(
separator_token_id=__snake_case , pad_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case , )
| 21 |
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_a : List[Any] = logging.get_logger(__name__)
_a : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
_a : Any = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> str:
for attribute in key.split(""".""" ):
__lowerCAmelCase = getattr(lowercase , lowercase )
if weight_type is not None:
__lowerCAmelCase = getattr(lowercase , lowercase ).shape
else:
__lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
__lowerCAmelCase = value
elif weight_type == "weight_g":
__lowerCAmelCase = value
elif weight_type == "weight_v":
__lowerCAmelCase = value
elif weight_type == "bias":
__lowerCAmelCase = value
else:
__lowerCAmelCase = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]:
__lowerCAmelCase = []
__lowerCAmelCase = fairseq_model.state_dict()
__lowerCAmelCase = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , )
__lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
__lowerCAmelCase = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split(""".""" )[:-1] ) != key):
# special case since naming is very similar
continue
__lowerCAmelCase = True
if "*" in mapped_key:
__lowerCAmelCase = name.split(lowercase )[0].split(""".""" )[-2]
__lowerCAmelCase = mapped_key.replace("""*""" , lowercase )
if "weight_g" in name:
__lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
__lowerCAmelCase = """weight_v"""
elif "bias" in name:
__lowerCAmelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase = """weight"""
else:
__lowerCAmelCase = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__lowerCAmelCase = full_name.split("""conv_layers.""" )[-1]
__lowerCAmelCase = name.split(""".""" )
__lowerCAmelCase = int(items[0] )
__lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Dict:
if config_path is not None:
__lowerCAmelCase = UniSpeechSatConfig.from_pretrained(lowercase )
else:
__lowerCAmelCase = UniSpeechSatConfig()
__lowerCAmelCase = """"""
if is_finetuned:
__lowerCAmelCase = UniSpeechSatForCTC(lowercase )
else:
__lowerCAmelCase = UniSpeechSatForPreTraining(lowercase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
__lowerCAmelCase = model[0].eval()
recursively_load_weights(lowercase , lowercase )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
_a : List[str] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_a : Union[str, Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 689 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def snake_case_ (UpperCamelCase : Callable[[int | float], int | float] , UpperCamelCase : int | float , UpperCamelCase : int | float , UpperCamelCase : int = 100 , ):
'''simple docstring'''
_a = x_start
_a = fnc(UpperCamelCase )
_a = 0.0
for _ in range(UpperCamelCase ):
# Approximates curve as a sequence of linear lines and sums their length
_a = (x_end - x_start) / steps + xa
_a = fnc(UpperCamelCase )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
_a = xa
_a = fxa
return length
if __name__ == "__main__":
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
return math.sin(10 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
_snake_case : Dict = 10
while i <= 100000:
print(F'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 22 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
_a : str = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
_a : Dict = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
_a : List[str] = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ),reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = spearmanr(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 689 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
UpperCamelCase_ = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(_UpperCAmelCase ) , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase ) , x.transpose() ) )
UpperCamelCase_ = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = np.random.randn(3 , 4 )
UpperCamelCase_ = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase ) , transpose(_UpperCAmelCase ).numpy() ) )
UpperCamelCase_ = np.random.randn(3 , 4 , 5 )
UpperCamelCase_ = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase , axes=(1, 2, 0) ) , transpose(_UpperCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = np.random.randn(3 , 4 )
UpperCamelCase_ = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase ) , transpose(_UpperCAmelCase ).numpy() ) )
UpperCamelCase_ = np.random.randn(3 , 4 , 5 )
UpperCamelCase_ = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase , axes=(1, 2, 0) ) , transpose(_UpperCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = np.random.randn(3 , 4 )
UpperCamelCase_ = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase ) , np.asarray(transpose(_UpperCAmelCase ) ) ) )
UpperCamelCase_ = np.random.randn(3 , 4 , 5 )
UpperCamelCase_ = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(_UpperCAmelCase , axes=(1, 2, 0) ) ) ) )
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (4, 3) ) , np.reshape(_UpperCAmelCase , (4, 3) ) ) )
UpperCamelCase_ = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (12, 5) ) , np.reshape(_UpperCAmelCase , (12, 5) ) ) )
@require_torch
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = np.random.randn(3 , 4 )
UpperCamelCase_ = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (4, 3) ) , reshape(_UpperCAmelCase , (4, 3) ).numpy() ) )
UpperCamelCase_ = np.random.randn(3 , 4 , 5 )
UpperCamelCase_ = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (12, 5) ) , reshape(_UpperCAmelCase , (12, 5) ).numpy() ) )
@require_tf
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = np.random.randn(3 , 4 )
UpperCamelCase_ = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (4, 3) ) , reshape(_UpperCAmelCase , (4, 3) ).numpy() ) )
UpperCamelCase_ = np.random.randn(3 , 4 , 5 )
UpperCamelCase_ = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (12, 5) ) , reshape(_UpperCAmelCase , (12, 5) ).numpy() ) )
@require_flax
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = np.random.randn(3 , 4 )
UpperCamelCase_ = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (4, 3) ) , np.asarray(reshape(_UpperCAmelCase , (4, 3) ) ) ) )
UpperCamelCase_ = np.random.randn(3 , 4 , 5 )
UpperCamelCase_ = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (12, 5) ) , np.asarray(reshape(_UpperCAmelCase , (12, 5) ) ) ) )
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase ) , np.squeeze(_UpperCAmelCase ) ) )
UpperCamelCase_ = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase , axis=2 ) , np.squeeze(_UpperCAmelCase , axis=2 ) ) )
@require_torch
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = np.random.randn(1 , 3 , 4 )
UpperCamelCase_ = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase ) , squeeze(_UpperCAmelCase ).numpy() ) )
UpperCamelCase_ = np.random.randn(1 , 4 , 1 , 5 )
UpperCamelCase_ = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase , axis=2 ) , squeeze(_UpperCAmelCase , axis=2 ).numpy() ) )
@require_tf
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = np.random.randn(1 , 3 , 4 )
UpperCamelCase_ = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase ) , squeeze(_UpperCAmelCase ).numpy() ) )
UpperCamelCase_ = np.random.randn(1 , 4 , 1 , 5 )
UpperCamelCase_ = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase , axis=2 ) , squeeze(_UpperCAmelCase , axis=2 ).numpy() ) )
@require_flax
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = np.random.randn(1 , 3 , 4 )
UpperCamelCase_ = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase ) , np.asarray(squeeze(_UpperCAmelCase ) ) ) )
UpperCamelCase_ = np.random.randn(1 , 4 , 1 , 5 )
UpperCamelCase_ = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase , axis=2 ) , np.asarray(squeeze(_UpperCAmelCase , axis=2 ) ) ) )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_UpperCAmelCase , axis=1 ) , np.expand_dims(_UpperCAmelCase , axis=1 ) ) )
@require_torch
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = np.random.randn(3 , 4 )
UpperCamelCase_ = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(expand_dims(_UpperCAmelCase , axis=1 ) , expand_dims(_UpperCAmelCase , axis=1 ).numpy() ) )
@require_tf
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = np.random.randn(3 , 4 )
UpperCamelCase_ = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(expand_dims(_UpperCAmelCase , axis=1 ) , expand_dims(_UpperCAmelCase , axis=1 ).numpy() ) )
@require_flax
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = np.random.randn(3 , 4 )
UpperCamelCase_ = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(expand_dims(_UpperCAmelCase , axis=1 ) , np.asarray(expand_dims(_UpperCAmelCase , axis=1 ) ) ) )
| 23 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=lowerCAmelCase_ ):
a : List[str] =["""onnx"""]
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(self,["""onnx"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""onnx"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""onnx"""] )
| 689 | 0 |
'''simple docstring'''
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def _UpperCamelCase (_lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : bool = False )-> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(_lowerCamelCase ), magnitude * sin(_lowerCamelCase )]
return [magnitude * cos(radians(_lowerCamelCase ) ), magnitude * sin(radians(_lowerCamelCase ) )]
def _UpperCamelCase (_lowerCamelCase : NDArray[floataa] , _lowerCamelCase : NDArray[floataa] , _lowerCamelCase : float = 10**-1 )-> bool:
'''simple docstring'''
__snake_case = cross(_lowerCamelCase , _lowerCamelCase )
__snake_case = sum(_lowerCamelCase )
return abs(_lowerCamelCase ) < eps
if __name__ == "__main__":
# Test to check if it works
UpperCAmelCase_ : List[str] = array(
[
polar_force(718.4, 1_8_0 - 3_0),
polar_force(879.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
UpperCAmelCase_ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
UpperCAmelCase_ : str = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
UpperCAmelCase_ : Optional[Any] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
UpperCAmelCase_ : List[Any] = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
UpperCAmelCase_ : int = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 24 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_a : int = logging.get_logger(__name__)
_a : Optional[int] = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[str] ="""gptj"""
a : Optional[int] ={
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self,__SCREAMING_SNAKE_CASE=5_04_00,__SCREAMING_SNAKE_CASE=20_48,__SCREAMING_SNAKE_CASE=40_96,__SCREAMING_SNAKE_CASE=28,__SCREAMING_SNAKE_CASE=16,__SCREAMING_SNAKE_CASE=64,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="gelu_new",__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=1e-5,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=False,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_embd
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_inner
__lowerCAmelCase = rotary_dim
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = use_cache
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(
bos_token_id=__SCREAMING_SNAKE_CASE,eos_token_id=__SCREAMING_SNAKE_CASE,tie_word_embeddings=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = "default",__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False,):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE,task=__SCREAMING_SNAKE_CASE,patching_specs=__SCREAMING_SNAKE_CASE,use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config,"""pad_token_id""",__SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
__lowerCAmelCase = 0
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE,direction="""inputs""" )
__lowerCAmelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._config.n_head
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,):
'''simple docstring'''
__lowerCAmelCase = super(__SCREAMING_SNAKE_CASE,self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE,batch_size=__SCREAMING_SNAKE_CASE,seq_length=__SCREAMING_SNAKE_CASE,is_pair=__SCREAMING_SNAKE_CASE,framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowerCAmelCase = seqlen + 2
__lowerCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCAmelCase = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
__lowerCAmelCase = common_inputs["""attention_mask"""]
if self.use_past:
__lowerCAmelCase = ordered_inputs["""attention_mask"""].dtype
__lowerCAmelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,dtype=__SCREAMING_SNAKE_CASE )],dim=1 )
return ordered_inputs
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 13
| 689 | 0 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Optional[Any] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(_a , _a)
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = emb.weight.shape
SCREAMING_SNAKE_CASE : int = nn.Linear(_a , _a , bias=_a)
SCREAMING_SNAKE_CASE : str = emb.weight.data
return lin_layer
def lowerCamelCase__ ( _a , _a="facebook/mbart-large-en-ro" , _a=False , _a=False):
SCREAMING_SNAKE_CASE : List[str] = torch.load(_a , map_location="cpu")["model"]
remove_ignore_keys_(_a)
SCREAMING_SNAKE_CASE : List[Any] = state_dict["encoder.embed_tokens.weight"].shape[0]
SCREAMING_SNAKE_CASE : Optional[int] = MBartConfig.from_pretrained(_a , vocab_size=_a)
if mbart_aa and finetuned:
SCREAMING_SNAKE_CASE : int = "relu"
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict["decoder.embed_tokens.weight"]
SCREAMING_SNAKE_CASE : Optional[Any] = MBartForConditionalGeneration(_a)
model.model.load_state_dict(_a)
if finetuned:
SCREAMING_SNAKE_CASE : str = make_linear_from_emb(model.model.shared)
return model
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
a_ = parser.parse_args()
a_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 25 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase = 5000_0000 ) -> int:
__lowerCAmelCase = set()
__lowerCAmelCase = int((limit - 24) ** (1 / 2) )
__lowerCAmelCase = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowercase ) ) )
for primea in primes:
__lowerCAmelCase = primea * primea
for primea in primes:
__lowerCAmelCase = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
__lowerCAmelCase = primea * primea * primea * primea
__lowerCAmelCase = square + cube + tetr
if total >= limit:
break
ret.add(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(f'{solution() = }')
| 689 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _A :
def __init__( self : List[Any] , __magic_name__ : Collection[float] | None = None ) -> None:
"""simple docstring"""
if components is None:
__snake_case : Tuple = []
__snake_case : List[str] = list(__magic_name__ )
def __len__( self : Optional[Any] ) -> int:
"""simple docstring"""
return len(self.__components )
def __str__( self : str ) -> str:
"""simple docstring"""
return "(" + ",".join(map(__magic_name__ , self.__components ) ) + ")"
def __add__( self : int , __magic_name__ : Vector ) -> Vector:
"""simple docstring"""
__snake_case : List[str] = len(self )
if size == len(__magic_name__ ):
__snake_case : Any = [self.__components[i] + other.component(__magic_name__ ) for i in range(__magic_name__ )]
return Vector(__magic_name__ )
else:
raise Exception("""must have the same size""" )
def __sub__( self : List[Any] , __magic_name__ : Vector ) -> Vector:
"""simple docstring"""
__snake_case : Union[str, Any] = len(self )
if size == len(__magic_name__ ):
__snake_case : Dict = [self.__components[i] - other.component(__magic_name__ ) for i in range(__magic_name__ )]
return Vector(__magic_name__ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self : List[Any] , __magic_name__ : float ) -> Vector:
"""simple docstring"""
...
@overload
def __mul__( self : Any , __magic_name__ : Vector ) -> float:
"""simple docstring"""
...
def __mul__( self : Dict , __magic_name__ : float | Vector ) -> float | Vector:
"""simple docstring"""
if isinstance(__magic_name__ , (float, int) ):
__snake_case : Optional[int] = [c * other for c in self.__components]
return Vector(__magic_name__ )
elif isinstance(__magic_name__ , __magic_name__ ) and len(self ) == len(__magic_name__ ):
__snake_case : Tuple = len(self )
__snake_case : int = [self.__components[i] * other.component(__magic_name__ ) for i in range(__magic_name__ )]
return sum(__magic_name__ )
else: # error case
raise Exception("""invalid operand!""" )
def lowercase__ ( self : Dict ) -> Vector:
"""simple docstring"""
return Vector(self.__components )
def lowercase__ ( self : List[str] , __magic_name__ : int ) -> float:
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def lowercase__ ( self : Dict , __magic_name__ : int , __magic_name__ : float ) -> None:
"""simple docstring"""
assert -len(self.__components ) <= pos < len(self.__components )
__snake_case : str = value
def lowercase__ ( self : Any ) -> float:
"""simple docstring"""
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
__snake_case : str = [c**2 for c in self.__components]
return math.sqrt(sum(__magic_name__ ) )
def lowercase__ ( self : Tuple , __magic_name__ : Vector , __magic_name__ : bool = False ) -> float:
"""simple docstring"""
__snake_case : Tuple = self * other
__snake_case : Optional[Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _a ( _lowerCamelCase ) -> Vector:
"""simple docstring"""
assert isinstance(_lowerCamelCase , _lowerCamelCase )
return Vector([0] * dimension )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Vector:
"""simple docstring"""
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (isinstance(_lowerCamelCase , _lowerCamelCase ))
__snake_case : List[Any] = [0] * dimension
__snake_case : Union[str, Any] = 1
return Vector(_lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Vector:
"""simple docstring"""
assert (
isinstance(_lowerCamelCase , _lowerCamelCase )
and isinstance(_lowerCamelCase , _lowerCamelCase )
and (isinstance(_lowerCamelCase , (int, float) ))
)
return x * scalar + y
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Vector:
"""simple docstring"""
random.seed(_lowerCamelCase )
__snake_case : List[Any] = [random.randint(_lowerCamelCase , _lowerCamelCase ) for _ in range(_lowerCamelCase )]
return Vector(_lowerCamelCase )
class _A :
def __init__( self : List[Any] , __magic_name__ : list[list[float]] , __magic_name__ : int , __magic_name__ : int ) -> None:
"""simple docstring"""
__snake_case : Tuple = matrix
__snake_case : List[str] = w
__snake_case : Union[str, Any] = h
def __str__( self : str ) -> str:
"""simple docstring"""
__snake_case : Optional[int] = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Tuple , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__snake_case : Tuple = []
for i in range(self.__height ):
__snake_case : Optional[Any] = [
self.__matrix[i][j] + other.component(__magic_name__ , __magic_name__ )
for j in range(self.__width )
]
matrix.append(__magic_name__ )
return Matrix(__magic_name__ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self : int , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__snake_case : Optional[Any] = []
for i in range(self.__height ):
__snake_case : Union[str, Any] = [
self.__matrix[i][j] - other.component(__magic_name__ , __magic_name__ )
for j in range(self.__width )
]
matrix.append(__magic_name__ )
return Matrix(__magic_name__ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self : int , __magic_name__ : float ) -> Matrix:
"""simple docstring"""
...
@overload
def __mul__( self : List[Any] , __magic_name__ : Vector ) -> Vector:
"""simple docstring"""
...
def __mul__( self : Dict , __magic_name__ : float | Vector ) -> Vector | Matrix:
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ): # matrix-vector
if len(__magic_name__ ) == self.__width:
__snake_case : Optional[int] = zero_vector(self.__height )
for i in range(self.__height ):
__snake_case : List[Any] = [
self.__matrix[i][j] * other.component(__magic_name__ )
for j in range(self.__width )
]
ans.change_component(__magic_name__ , sum(__magic_name__ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(__magic_name__ , (int, float) ): # matrix-scalar
__snake_case : Tuple = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(__magic_name__ , self.__width , self.__height )
return None
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self.__height
def lowercase__ ( self : int ) -> int:
"""simple docstring"""
return self.__width
def lowercase__ ( self : str , __magic_name__ : int , __magic_name__ : int ) -> float:
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def lowercase__ ( self : Dict , __magic_name__ : int , __magic_name__ : int , __magic_name__ : float ) -> None:
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
__snake_case : Optional[Any] = value
else:
raise Exception("""change_component: indices out of bounds""" )
def lowercase__ ( self : str , __magic_name__ : int , __magic_name__ : int ) -> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
__snake_case : Optional[int] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__magic_name__ ) ):
__snake_case : List[str] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(__magic_name__ , self.__width - 1 , self.__height - 1 ).determinant()
def lowercase__ ( self : List[str] , __magic_name__ : int , __magic_name__ : int ) -> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__magic_name__ , __magic_name__ )
else:
raise Exception("""Indices out of bounds""" )
def lowercase__ ( self : Optional[int] ) -> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__snake_case : Tuple = [
self.__matrix[0][y] * self.cofactor(0 , __magic_name__ ) for y in range(self.__width )
]
return sum(__magic_name__ )
def _a ( _lowerCamelCase ) -> Matrix:
"""simple docstring"""
__snake_case : list[list[float]] = [[0] * n for _ in range(_lowerCamelCase )]
return Matrix(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Matrix:
"""simple docstring"""
random.seed(_lowerCamelCase )
__snake_case : list[list[float]] = [
[random.randint(_lowerCamelCase , _lowerCamelCase ) for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )
]
return Matrix(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
| 26 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Optional[int] =TextToVideoSDPipeline
a : Optional[int] =TEXT_TO_IMAGE_PARAMS
a : Any =TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a : Union[str, Any] =frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D"""),up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D"""),cross_attention_dim=32,attention_head_dim=4,)
__lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0085,beta_end=0.012,beta_schedule="""scaled_linear""",clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,)
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],latent_channels=4,sample_size=1_28,)
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1e-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=10_00,hidden_act="""gelu""",projection_dim=5_12,)
__lowerCAmelCase = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = TextToVideoSDPipeline(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = """np"""
__lowerCAmelCase = sd_pipe(**__SCREAMING_SNAKE_CASE ).frames
__lowerCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__lowerCAmelCase = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available(),reason="""XFormers attention is only available with CUDA and `xformers` installed""",)
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
__lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowerCAmelCase = pipe.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=25,output_type="""pt""" ).frames
__lowerCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
__lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase = pipe.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=2,output_type="""pt""" ).frames
__lowerCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 689 | 0 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = 'char'
__magic_name__ = 'bpe'
__magic_name__ = 'wp'
__A : int = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = ['image_processor', 'char_tokenizer']
__magic_name__ = 'ViTImageProcessor'
__magic_name__ = 'MgpstrTokenizer'
def __init__( self , snake_case_=None , snake_case_=None , **snake_case_ ):
_A = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case_ , )
_A = kwargs.pop('feature_extractor' )
_A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
_A = tokenizer
_A = AutoTokenizer.from_pretrained('gpt2' )
_A = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(snake_case_ , snake_case_ )
def __call__( self , snake_case_=None , snake_case_=None , snake_case_=None , **snake_case_ ):
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
_A = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if text is not None:
_A = self.char_tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
_A = encodings['input_ids']
return inputs
def lowerCAmelCase__ ( self , snake_case_ ):
_A, _A, _A = sequences
_A = char_preds.size(0 )
_A, _A = self._decode_helper(snake_case_ , 'char' )
_A, _A = self._decode_helper(snake_case_ , 'bpe' )
_A, _A = self._decode_helper(snake_case_ , 'wp' )
_A = []
_A = []
for i in range(snake_case_ ):
_A = [char_scores[i], bpe_scores[i], wp_scores[i]]
_A = [char_strs[i], bpe_strs[i], wp_strs[i]]
_A = scores.index(max(snake_case_ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
_A = {}
_A = final_strs
_A = final_scores
_A = char_strs
_A = bpe_strs
_A = wp_strs
return out
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
if format == DecodeType.CHARACTER:
_A = self.char_decode
_A = 1
_A = '[s]'
elif format == DecodeType.BPE:
_A = self.bpe_decode
_A = 2
_A = '#'
elif format == DecodeType.WORDPIECE:
_A = self.wp_decode
_A = 102
_A = '[SEP]'
else:
raise ValueError(F"Format {format} is not supported." )
_A, _A = [], []
_A = pred_logits.size(0 )
_A = pred_logits.size(1 )
_A, _A = pred_logits.topk(1 , dim=-1 , largest=snake_case_ , sorted=snake_case_ )
_A = preds_index.view(-1 , snake_case_ )[:, 1:]
_A = decoder(snake_case_ )
_A, _A = torch.nn.functional.softmax(snake_case_ , dim=2 ).max(dim=2 )
_A = preds_max_prob[:, 1:]
for index in range(snake_case_ ):
_A = preds_str[index].find(snake_case_ )
_A = preds_str[index][:pred_eos]
_A = preds_index[index].cpu().tolist()
_A = pred_index.index(snake_case_ ) if eos_token in pred_index else -1
_A = preds_max_prob[index][: pred_eos_index + 1]
_A = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(snake_case_ )
conf_scores.append(snake_case_ )
return dec_strs, conf_scores
def lowerCAmelCase__ ( self , snake_case_ ):
_A = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(snake_case_ )]
return decode_strs
def lowerCAmelCase__ ( self , snake_case_ ):
return self.bpe_tokenizer.batch_decode(snake_case_ )
def lowerCAmelCase__ ( self , snake_case_ ):
_A = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(snake_case_ )]
return decode_strs
| 27 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _lowerCAmelCase ( lowercase ) -> Optional[int]:
if not is_accelerate_available():
return method
__lowerCAmelCase = version.parse(accelerate.__version__ ).base_version
if version.parse(lowercase ) < version.parse("""0.17.0""" ):
return method
def wrapper(self , *lowercase , **lowercase ):
if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ):
self._hf_hook.pre_forward(self )
return method(self , *lowercase , **lowercase )
return wrapper
| 689 | 0 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
UpperCamelCase_ = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = Github(os.environ['GITHUB_TOKEN'] )
SCREAMING_SNAKE_CASE : Tuple = g.get_repo('huggingface/transformers' )
SCREAMING_SNAKE_CASE : List[Any] = repo.get_issues(state='open' )
for issue in open_issues:
SCREAMING_SNAKE_CASE : Any = sorted([comment for comment in issue.get_comments()] ,key=lambda __UpperCamelCase : i.created_at ,reverse=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = comments[0] if len(__UpperCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 28 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
# load base model
__lowerCAmelCase = StableDiffusionPipeline.from_pretrained(lowercase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
__lowerCAmelCase = load_file(lowercase )
__lowerCAmelCase = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
__lowerCAmelCase = pipeline.text_encoder
else:
__lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
__lowerCAmelCase = pipeline.unet
# find the target layer
__lowerCAmelCase = layer_infos.pop(0 )
while len(lowercase ) > -1:
try:
__lowerCAmelCase = curr_layer.__getattr__(lowercase )
if len(lowercase ) > 0:
__lowerCAmelCase = layer_infos.pop(0 )
elif len(lowercase ) == 0:
break
except Exception:
if len(lowercase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
__lowerCAmelCase = layer_infos.pop(0 )
__lowerCAmelCase = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(lowercase )
else:
pair_keys.append(lowercase )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
__lowerCAmelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
__lowerCAmelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase ).unsqueeze(2 ).unsqueeze(3 )
else:
__lowerCAmelCase = state_dict[pair_keys[0]].to(torch.floataa )
__lowerCAmelCase = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase )
# update visited list
for item in pair_keys:
visited.append(lowercase )
return pipeline
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
_a : Optional[int] = parser.parse_args()
_a : Dict = args.base_model_path
_a : Optional[Any] = args.checkpoint_path
_a : Union[str, Any] = args.dump_path
_a : Optional[int] = args.lora_prefix_unet
_a : int = args.lora_prefix_text_encoder
_a : str = args.alpha
_a : Any = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_a : Tuple = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 689 | 0 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
A_ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
A_ = typing.Union[np.floataa, int, float] # noqa: UP007
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return np.sqrt(np.sum((np.asarray(lowerCAmelCase__ ) - np.asarray(lowerCAmelCase__ )) ** 2 ) )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return sum((va - va) ** 2 for va, va in zip(lowerCAmelCase__ ,lowerCAmelCase__ ) ) ** (1 / 2)
if __name__ == "__main__":
def lowercase ( ):
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' ,number=10_000 ,globals=globals() ,) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' ,number=10_000 ,globals=globals() ,) )
benchmark()
| 29 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def _lowerCAmelCase ( lowercase = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2
def _lowerCAmelCase ( lowercase = "" ) -> bool:
if len(lowercase ) == 0:
return True
__lowerCAmelCase = input_str.replace(""" """ , """""" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__lowerCAmelCase = {}
for character in lower_case_input_str:
__lowerCAmelCase = character_freq_dict.get(lowercase , 0 ) + 1
__lowerCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _lowerCAmelCase ( lowercase = "" ) -> None:
print("""\nFor string = """ , lowercase , """:""" )
print(
"""> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(lowercase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
print(
"""> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(lowercase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
if __name__ == "__main__":
_a : int = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
_a : Optional[int] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
| 689 | 0 |
def lowerCamelCase__ ( _lowercase , _lowercase = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
'''Warning: upper bound of deterministic test is exceeded. '''
'''Pass allow_probable=True to allow probabilistic test. '''
'''A return value of True indicates a probable prime.''' )
# array bounds provided by analysis
UpperCAmelCase_ : Tuple = [
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
UpperCAmelCase_ : Dict = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_lowercase , 1 ):
if n < _p:
# then we have our last prime to check
UpperCAmelCase_ : Dict = primes[:idx]
break
UpperCAmelCase_, UpperCAmelCase_ : Dict = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
UpperCAmelCase_ : Any = False
for r in range(_lowercase ):
UpperCAmelCase_ : Optional[int] = pow(_lowercase , d * 2**r , _lowercase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
UpperCAmelCase_ : int = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def lowerCamelCase__ ( ):
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin() | 30 |
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _lowerCAmelCase ( lowercase ) -> List[Any]:
__lowerCAmelCase = VideoMAEConfig()
set_architecture_configs(lowercase , lowercase )
if "finetuned" not in model_name:
__lowerCAmelCase = False
if "finetuned" in model_name:
__lowerCAmelCase = """huggingface/label-files"""
if "kinetics" in model_name:
__lowerCAmelCase = 400
__lowerCAmelCase = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
__lowerCAmelCase = 174
__lowerCAmelCase = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
__lowerCAmelCase = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="""dataset""" ) , """r""" ) )
__lowerCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase ( lowercase , lowercase ) -> Any:
if "small" in model_name:
__lowerCAmelCase = 384
__lowerCAmelCase = 1536
__lowerCAmelCase = 12
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 3
__lowerCAmelCase = 192
__lowerCAmelCase = 768
elif "large" in model_name:
__lowerCAmelCase = 1024
__lowerCAmelCase = 4096
__lowerCAmelCase = 24
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 8
__lowerCAmelCase = 512
__lowerCAmelCase = 2048
elif "huge" in model_name:
__lowerCAmelCase = 1280
__lowerCAmelCase = 5120
__lowerCAmelCase = 32
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 8
__lowerCAmelCase = 640
__lowerCAmelCase = 2560
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def _lowerCAmelCase ( lowercase ) -> List[str]:
if "encoder." in name:
__lowerCAmelCase = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
__lowerCAmelCase = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
__lowerCAmelCase = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
__lowerCAmelCase = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__lowerCAmelCase = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
__lowerCAmelCase = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
__lowerCAmelCase = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
__lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
__lowerCAmelCase = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
__lowerCAmelCase = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
__lowerCAmelCase = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
__lowerCAmelCase = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
__lowerCAmelCase = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__lowerCAmelCase = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__lowerCAmelCase = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
__lowerCAmelCase = name.replace("""head""" , """classifier""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]:
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(lowercase )
if key.startswith("""encoder.""" ):
__lowerCAmelCase = key.replace("""encoder.""" , """""" )
if "qkv" in key:
__lowerCAmelCase = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
__lowerCAmelCase = config.decoder_hidden_size
__lowerCAmelCase = int(key_split[2] )
__lowerCAmelCase = """decoder.decoder_layers."""
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = config.hidden_size
__lowerCAmelCase = int(key_split[1] )
__lowerCAmelCase = """videomae.encoder.layer."""
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val
return orig_state_dict
def _lowerCAmelCase ( ) -> str:
__lowerCAmelCase = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__lowerCAmelCase = np.load(lowercase )
return list(lowercase )
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__lowerCAmelCase = get_videomae_config(lowercase )
if "finetuned" in model_name:
__lowerCAmelCase = VideoMAEForVideoClassification(lowercase )
else:
__lowerCAmelCase = VideoMAEForPreTraining(lowercase )
# download original checkpoint, hosted on Google Drive
__lowerCAmelCase = """pytorch_model.bin"""
gdown.cached_download(lowercase , lowercase , quiet=lowercase )
__lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" )
if "model" in files:
__lowerCAmelCase = files["""model"""]
else:
__lowerCAmelCase = files["""module"""]
__lowerCAmelCase = convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
# verify model on basic input
__lowerCAmelCase = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__lowerCAmelCase = prepare_video()
__lowerCAmelCase = image_processor(lowercase , return_tensors="""pt""" )
if "finetuned" not in model_name:
__lowerCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
__lowerCAmelCase = torch.load(lowercase )
__lowerCAmelCase = model(**lowercase )
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
__lowerCAmelCase = torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(f'Model name not supported. Should be one of {model_names}' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , lowercase , atol=1e-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__lowerCAmelCase = outputs.loss
assert torch.allclose(lowercase , lowercase , atol=1e-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model and image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase )
model.save_pretrained(lowercase )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase , organization="""nielsr""" )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_a : int = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 689 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "funnel"
lowercase_ = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self : int , _lowerCAmelCase : Optional[int]=30_522 , _lowerCAmelCase : List[str]=[4, 4, 4] , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : int=768 , _lowerCAmelCase : Optional[Any]=12 , _lowerCAmelCase : Optional[Any]=64 , _lowerCAmelCase : Optional[Any]=3_072 , _lowerCAmelCase : List[str]="gelu_new" , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Dict=None , _lowerCAmelCase : str=1E-9 , _lowerCAmelCase : Any="mean" , _lowerCAmelCase : Union[str, Any]="relative_shift" , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Tuple=True , **_lowerCAmelCase : Optional[Any] , ):
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = block_sizes
SCREAMING_SNAKE_CASE_ = [1] * len(_lowerCAmelCase ) if block_repeats is None else block_repeats
assert len(_lowerCAmelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
SCREAMING_SNAKE_CASE_ = num_decoder_layers
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = n_head
SCREAMING_SNAKE_CASE_ = d_head
SCREAMING_SNAKE_CASE_ = d_inner
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = initializer_std
SCREAMING_SNAKE_CASE_ = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."
SCREAMING_SNAKE_CASE_ = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."
SCREAMING_SNAKE_CASE_ = attention_type
SCREAMING_SNAKE_CASE_ = separate_cls
SCREAMING_SNAKE_CASE_ = truncate_seq
SCREAMING_SNAKE_CASE_ = pool_q_only
super().__init__(**_lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : List[Any] ):
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.' )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return len(self.block_sizes )
@num_blocks.setter
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Union[str, Any] ):
raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.' ) | 31 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_a : Tuple = """\
"""
_a : Tuple = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
_a : Optional[Any] = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ),reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = 16,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__lowerCAmelCase = """cuda"""
else:
__lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__lowerCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__SCREAMING_SNAKE_CASE ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__lowerCAmelCase = model.config.max_length - 1
else:
__lowerCAmelCase = model.config.max_length
__lowerCAmelCase = tokenizer(
__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,padding=__SCREAMING_SNAKE_CASE,truncation=__SCREAMING_SNAKE_CASE,max_length=__SCREAMING_SNAKE_CASE,return_tensors="""pt""",return_attention_mask=__SCREAMING_SNAKE_CASE,).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = encodings["""input_ids"""]
__lowerCAmelCase = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ),1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ),2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__lowerCAmelCase = []
__lowerCAmelCase = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0,len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase = min(start_index + batch_size,len(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = encoded_texts[start_index:end_index]
__lowerCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
__lowerCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch],dim=1 )
__lowerCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size(),dtype=torch.intaa ).to(__SCREAMING_SNAKE_CASE ), attn_mask],dim=1 )
__lowerCAmelCase = encoded_batch
with torch.no_grad():
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE ).logits
__lowerCAmelCase = out_logits[..., :-1, :].contiguous()
__lowerCAmelCase = labels[..., 1:].contiguous()
__lowerCAmelCase = attn_mask[..., 1:].contiguous()
__lowerCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1,2 ),__SCREAMING_SNAKE_CASE ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__SCREAMING_SNAKE_CASE )}
| 689 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["YolosFeatureExtractor"]
UpperCAmelCase_ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 32 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Union[str, Any] =["""image_processor"""]
a : Dict ="""SamImageProcessor"""
def __init__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.image_processor
__lowerCAmelCase = -10
__lowerCAmelCase = self.image_processor.size["""longest_edge"""]
def __call__( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = self.image_processor(
__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
# pop arguments that are not used in the foward but used nevertheless
__lowerCAmelCase = encoding_image_processor["""original_sizes"""]
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks if Torch or TF tensor
__lowerCAmelCase = original_sizes.numpy()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._check_and_preprocess_points(
input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = self._normalize_and_convert(
__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,)
return encoding_image_processor
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="pt",):
'''simple docstring'''
if input_points is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0] ) for point in input_points
]
else:
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
for point, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__lowerCAmelCase , __lowerCAmelCase = self._pad_points_and_labels(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_labels is not None:
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0],is_bounding_box=__SCREAMING_SNAKE_CASE )
for box in input_boxes
]
else:
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,is_bounding_box=__SCREAMING_SNAKE_CASE )
for box, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
]
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
__lowerCAmelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points} )
if input_labels is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels} )
return encoding_image_processor
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = max([point.shape[0] for point in input_points] )
__lowerCAmelCase = []
for i, point in enumerate(__SCREAMING_SNAKE_CASE ):
if point.shape[0] != expected_nb_points:
__lowerCAmelCase = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value],axis=0 )
__lowerCAmelCase = np.append(input_labels[i],[self.point_pad_value] )
processed_input_points.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = processed_input_points
return input_points, input_labels
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = original_size
__lowerCAmelCase , __lowerCAmelCase = self.image_processor._get_preprocess_shape(__SCREAMING_SNAKE_CASE,longest_edge=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = deepcopy(__SCREAMING_SNAKE_CASE ).astype(__SCREAMING_SNAKE_CASE )
if is_bounding_box:
__lowerCAmelCase = coords.reshape(-1,2,2 )
__lowerCAmelCase = coords[..., 0] * (new_w / old_w)
__lowerCAmelCase = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__lowerCAmelCase = coords.reshape(-1,4 )
return coords
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,):
'''simple docstring'''
if input_points is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks for TF or Torch tensor
__lowerCAmelCase = input_points.numpy().tolist()
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_points[0],__SCREAMING_SNAKE_CASE ):
raise ValueError("""Input points must be a list of list of floating points.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for input_point in input_points]
else:
__lowerCAmelCase = None
if input_labels is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ):
__lowerCAmelCase = input_labels.numpy().tolist()
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_labels[0],__SCREAMING_SNAKE_CASE ):
raise ValueError("""Input labels must be a list of list integers.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for label in input_labels]
else:
__lowerCAmelCase = None
if input_boxes is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ):
__lowerCAmelCase = input_boxes.numpy().tolist()
if (
not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0],__SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0][0],__SCREAMING_SNAKE_CASE )
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa ) for box in input_boxes]
else:
__lowerCAmelCase = None
return input_points, input_labels, input_boxes
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(__SCREAMING_SNAKE_CASE ) )
def lowerCamelCase__ ( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.image_processor.post_process_masks(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
| 689 | 0 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:Dict , *_a:Union[str, Any] , **_a:List[Any] ):
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 33 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
_a : int = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
a : Optional[str] =field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
a : Optional[str] =field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
a : int =field(
default=10_24 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
a : bool =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the training data."""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the validation data."""} )
a : Optional[str] =field(default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the test data."""} )
def lowerCamelCase__ ( self ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
__lowerCAmelCase = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__lowerCAmelCase = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _UpperCAmelCase :
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a : str =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def _lowerCAmelCase ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
__lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(lowercase )
datasets.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__lowerCAmelCase = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__lowerCAmelCase = data_args.train_file.split(""".""" )[-1]
__lowerCAmelCase = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__lowerCAmelCase = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(f'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
__lowerCAmelCase = load_dataset("""csv""" , data_files=lowercase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__lowerCAmelCase = load_dataset("""json""" , data_files=lowercase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__lowerCAmelCase = raw_datasets["""train"""].features["""label"""].names
__lowerCAmelCase = len(lowercase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__lowerCAmelCase = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowercase , )
__lowerCAmelCase = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__lowerCAmelCase = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__lowerCAmelCase = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__lowerCAmelCase = {"""Refused""": 0, """Entailed""": 1}
__lowerCAmelCase = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
__lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowercase ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowercase ):
__lowerCAmelCase = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
__lowerCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__lowerCAmelCase = examples["""statement"""]
__lowerCAmelCase = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
__lowerCAmelCase = tokenizer(lowercase , lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase )
__lowerCAmelCase = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
__lowerCAmelCase = raw_datasets.map(
lowercase , batched=lowercase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__lowerCAmelCase = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
__lowerCAmelCase = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__lowerCAmelCase = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
__lowerCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
__lowerCAmelCase = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
__lowerCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowercase ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase ):
__lowerCAmelCase = p.predictions[0] if isinstance(p.predictions , lowercase ) else p.predictions
__lowerCAmelCase = np.argmax(lowercase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__lowerCAmelCase = default_data_collator
elif training_args.fpaa:
__lowerCAmelCase = DataCollatorWithPadding(lowercase , pad_to_multiple_of=8 )
else:
__lowerCAmelCase = None
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=lowercase , args=lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase , tokenizer=lowercase , data_collator=lowercase , )
# Training
if training_args.do_train:
__lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowercase )
__lowerCAmelCase = train_result.metrics
__lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase )
)
__lowerCAmelCase = min(lowercase , len(lowercase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , lowercase )
trainer.save_metrics("""train""" , lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowerCAmelCase = trainer.evaluate(eval_dataset=lowercase )
__lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase )
__lowerCAmelCase = min(lowercase , len(lowercase ) )
trainer.log_metrics("""eval""" , lowercase )
trainer.save_metrics("""eval""" , lowercase )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__lowerCAmelCase = predict_dataset.remove_columns("""label""" )
__lowerCAmelCase = trainer.predict(lowercase , metric_key_prefix="""predict""" ).predictions
__lowerCAmelCase = np.argmax(lowercase , axis=1 )
__lowerCAmelCase = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(lowercase , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(lowercase ):
__lowerCAmelCase = label_list[item]
writer.write(f'{index}\t{item}\n' )
__lowerCAmelCase = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase )
else:
trainer.create_model_card(**lowercase )
def _lowerCAmelCase ( lowercase ) -> str:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 689 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class snake_case_ :
"""simple docstring"""
A_ = 42 # [batch_size x 3]
A_ = 42 # [batch_size x 3]
A_ = 42 # [batch_size x 3]
A_ = 42 # [batch_size x 3]
A_ = 42
A_ = 42
A_ = 42
A_ = 42
A_ = 42
def UpperCAmelCase__ ( self) -> List[Any]:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape) == len(self.y.shape) == len(self.z.shape) == len(self.origin.shape) == 2
def UpperCAmelCase__ ( self) -> Optional[int]:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa))
def UpperCAmelCase__ ( self) -> Any:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa))
def UpperCAmelCase__ ( self) -> torch.Tensor:
UpperCamelCase = torch.arange(self.height * self.width)
UpperCamelCase = torch.stack(
[
pixel_indices % self.width,
torch.div(lowerCamelCase_ , self.width , rounding_mode='''trunc'''),
] , axis=1 , )
return coords
@property
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase , *UpperCamelCase = self.shape
UpperCamelCase = int(np.prod(lowerCamelCase_))
UpperCamelCase = self.get_image_coords()
UpperCamelCase = torch.broadcast_to(coords.unsqueeze(0) , [batch_size * inner_batch_size, *coords.shape])
UpperCamelCase = self.get_camera_rays(lowerCamelCase_)
UpperCamelCase = rays.view(lowerCamelCase_ , inner_batch_size * self.height * self.width , 2 , 3)
return rays
def UpperCAmelCase__ ( self , lowerCamelCase_) -> torch.Tensor:
UpperCamelCase , *UpperCamelCase , UpperCamelCase = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCamelCase = coords.view(lowerCamelCase_ , -1 , 2)
UpperCamelCase = self.resolution()
UpperCamelCase = self.fov()
UpperCamelCase = (flat.float() / (res - 1)) * 2 - 1
UpperCamelCase = fracs * torch.tan(fov / 2)
UpperCamelCase = fracs.view(lowerCamelCase_ , -1 , 2)
UpperCamelCase = (
self.z.view(lowerCamelCase_ , 1 , 3)
+ self.x.view(lowerCamelCase_ , 1 , 3) * fracs[:, :, :1]
+ self.y.view(lowerCamelCase_ , 1 , 3) * fracs[:, :, 1:]
)
UpperCamelCase = directions / directions.norm(dim=-1 , keepdim=lowerCamelCase_)
UpperCamelCase = torch.stack(
[
torch.broadcast_to(self.origin.view(lowerCamelCase_ , 1 , 3) , [batch_size, directions.shape[1], 3]),
directions,
] , dim=2 , )
return rays.view(lowerCamelCase_ , *lowerCamelCase_ , 2 , 3)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowerCamelCase_ , height=lowerCamelCase_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
for theta in np.linspace(0 ,2 * np.pi ,num=20 ):
UpperCamelCase = np.array([np.sin(_lowercase ), np.cos(_lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCamelCase = -z * 4
UpperCamelCase = np.array([np.cos(_lowercase ), -np.sin(_lowercase ), 0.0] )
UpperCamelCase = np.cross(_lowercase ,_lowercase )
origins.append(_lowercase )
xs.append(_lowercase )
ys.append(_lowercase )
zs.append(_lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(_lowercase ,axis=0 ) ).float() ,x=torch.from_numpy(np.stack(_lowercase ,axis=0 ) ).float() ,y=torch.from_numpy(np.stack(_lowercase ,axis=0 ) ).float() ,z=torch.from_numpy(np.stack(_lowercase ,axis=0 ) ).float() ,width=_lowercase ,height=_lowercase ,x_fov=0.7 ,y_fov=0.7 ,shape=(1, len(_lowercase )) ,) | 34 |
'''simple docstring'''
import os
import sys
import unittest
_a : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_a : Union[str, Any] = os.path.join(git_repo_path, """src""", """diffusers""")
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = find_backend(""" if not is_torch_available():""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__lowerCAmelCase = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__lowerCAmelCase = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch_and_transformers_and_onnx""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""",__SCREAMING_SNAKE_CASE )
self.assertIn("""torch_and_transformers""",__SCREAMING_SNAKE_CASE )
self.assertIn("""flax_and_transformers""",__SCREAMING_SNAKE_CASE )
self.assertIn("""torch_and_transformers_and_onnx""",__SCREAMING_SNAKE_CASE )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""",objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""",objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""",objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""",objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""",objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""",objects["""torch_and_transformers_and_onnx"""] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = create_dummy_object("""CONSTANT""","""'torch'""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""\nCONSTANT = None\n""" )
__lowerCAmelCase = create_dummy_object("""function""","""'torch'""" )
self.assertEqual(
__SCREAMING_SNAKE_CASE,"""\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
__lowerCAmelCase = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
__lowerCAmelCase = create_dummy_object("""FakeClass""","""'torch'""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
__lowerCAmelCase = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""],__SCREAMING_SNAKE_CASE )
| 689 | 0 |
from __future__ import annotations
def a ( A__ ) -> bool:
'''simple docstring'''
if len(A__ ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
SCREAMING_SNAKE_CASE__ : List[str] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase ) -> tuple[int, int]:
try:
__lowerCAmelCase = float(lowercase )
except ValueError:
raise ValueError("""Please enter a valid number""" )
__lowerCAmelCase = decimal - int(lowercase )
if fractional_part == 0:
return int(lowercase ), 1
else:
__lowerCAmelCase = len(str(lowercase ).split(""".""" )[1] )
__lowerCAmelCase = int(decimal * (10**number_of_frac_digits) )
__lowerCAmelCase = 10**number_of_frac_digits
__lowerCAmelCase , __lowerCAmelCase = denominator, numerator
while True:
__lowerCAmelCase = dividend % divisor
if remainder == 0:
break
__lowerCAmelCase , __lowerCAmelCase = divisor, remainder
__lowerCAmelCase , __lowerCAmelCase = numerator / divisor, denominator / divisor
return int(lowercase ), int(lowercase )
if __name__ == "__main__":
print(f'{decimal_to_fraction(2) = }')
print(f'{decimal_to_fraction(89.0) = }')
print(f'{decimal_to_fraction("67") = }')
print(f'{decimal_to_fraction("45.0") = }')
print(f'{decimal_to_fraction(1.5) = }')
print(f'{decimal_to_fraction("6.25") = }')
print(f'{decimal_to_fraction("78td") = }')
| 689 | 0 |
def lowercase ( __A : int , __A : int ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def lowercase ( ) -> None:
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 36 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_a : Dict = _symbol_database.Default()
_a : Union[str, Any] = _descriptor_pool.Default().AddSerializedFile(
b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
_a : str = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_a : str = None
_a : Union[str, Any] = b"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_a : Optional[int] = 4_5
_a : List[Any] = 1_5_8_1
_a : str = 1_5_1_7
_a : Optional[Any] = 1_5_7_0
_a : List[str] = 1_5_8_4
_a : List[Any] = 1_7_9_3
_a : Union[str, Any] = 1_7_9_5
_a : Tuple = 1_9_1_6
_a : List[Any] = 1_8_6_4
_a : Any = 1_9_0_5
_a : Optional[Any] = 1_9_1_9
_a : Optional[int] = 2_4_2_9
_a : Tuple = 2_2_0_8
_a : Optional[Any] = 2_4_1_8
_a : List[Any] = 2_3_2_3
_a : str = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 689 | 0 |
def UpperCamelCase_ ( __a , __a , __a=False ) -> Optional[int]:
if isinstance(__a , __a ) and isinstance(__a , __a ):
a__ : Union[str, Any] = len(set_a.intersection(__a ) )
if alternative_union:
a__ : List[Any] = len(__a ) + len(__a )
else:
a__ : List[str] = len(set_a.union(__a ) )
return intersection / union
if isinstance(__a , (list, tuple) ) and isinstance(__a , (list, tuple) ):
a__ : Tuple = [element for element in set_a if element in set_b]
if alternative_union:
a__ : Tuple = len(__a ) + len(__a )
return len(__a ) / union
else:
a__ : List[Any] = set_a + [element for element in set_b if element not in set_a]
return len(__a ) / len(__a )
return len(__a ) / len(__a )
return None
if __name__ == "__main__":
UpperCamelCase : int = {"""a""", """b""", """c""", """d""", """e"""}
UpperCamelCase : Union[str, Any] = {"""c""", """d""", """e""", """f""", """h""", """i"""}
print(jaccard_similarity(set_a, set_b))
| 37 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : torch.FloatTensor
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE=True,):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = layers_per_block
__lowerCAmelCase = torch.nn.Convad(
__SCREAMING_SNAKE_CASE,block_out_channels[0],kernel_size=3,stride=1,padding=1,)
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
# down
__lowerCAmelCase = block_out_channels[0]
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = block_out_channels[i]
__lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase = get_down_block(
__SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,add_downsample=not is_final_block,resnet_eps=1e-6,downsample_padding=0,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
__lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""",attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
# out
__lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[-1],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = 2 * out_channels if double_z else out_channels
__lowerCAmelCase = nn.Convad(block_out_channels[-1],__SCREAMING_SNAKE_CASE,3,padding=1 )
__lowerCAmelCase = False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = x
__lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(""">=""","""1.11.0""" ):
for down_block in self.down_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
__lowerCAmelCase = down_block(__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE )
# post-process
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE="group",):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = layers_per_block
__lowerCAmelCase = nn.Convad(
__SCREAMING_SNAKE_CASE,block_out_channels[-1],kernel_size=3,stride=1,padding=1,)
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
__lowerCAmelCase = in_channels if norm_type == """spatial""" else None
# mid
__lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type,attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
# up
__lowerCAmelCase = list(reversed(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = reversed_block_out_channels[i]
__lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase = get_up_block(
__SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block + 1,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,prev_output_channel=__SCREAMING_SNAKE_CASE,add_upsample=not is_final_block,resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,resnet_time_scale_shift=__SCREAMING_SNAKE_CASE,)
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = output_channel
# out
if norm_type == "spatial":
__lowerCAmelCase = SpatialNorm(block_out_channels[0],__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[0],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = nn.Convad(block_out_channels[0],__SCREAMING_SNAKE_CASE,3,padding=1 )
__lowerCAmelCase = False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
__lowerCAmelCase = z
__lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(""">=""","""1.11.0""" ):
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
else:
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
else:
# middle
__lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = up_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="random",__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = n_e
__lowerCAmelCase = vq_embed_dim
__lowerCAmelCase = beta
__lowerCAmelCase = legacy
__lowerCAmelCase = nn.Embedding(self.n_e,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e,1.0 / self.n_e )
__lowerCAmelCase = remap
if self.remap is not None:
self.register_buffer("""used""",torch.tensor(np.load(self.remap ) ) )
__lowerCAmelCase = self.used.shape[0]
__lowerCAmelCase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__lowerCAmelCase = self.re_embed
__lowerCAmelCase = self.re_embed + 1
print(
f'Remapping {self.n_e} indices to {self.re_embed} indices. '
f'Using {self.unknown_index} for unknown indices.' )
else:
__lowerCAmelCase = n_e
__lowerCAmelCase = sane_index_shape
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
__lowerCAmelCase = inds.reshape(ishape[0],-1 )
__lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = (inds[:, :, None] == used[None, None, ...]).long()
__lowerCAmelCase = match.argmax(-1 )
__lowerCAmelCase = match.sum(2 ) < 1
if self.unknown_index == "random":
__lowerCAmelCase = torch.randint(0,self.re_embed,size=new[unknown].shape ).to(device=new.device )
else:
__lowerCAmelCase = self.unknown_index
return new.reshape(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
__lowerCAmelCase = inds.reshape(ishape[0],-1 )
__lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
__lowerCAmelCase = 0 # simply set to zero
__lowerCAmelCase = torch.gather(used[None, :][inds.shape[0] * [0], :],1,__SCREAMING_SNAKE_CASE )
return back.reshape(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = z.permute(0,2,3,1 ).contiguous()
__lowerCAmelCase = z.view(-1,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__lowerCAmelCase = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE,self.embedding.weight ),dim=1 )
__lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape )
__lowerCAmelCase = None
__lowerCAmelCase = None
# compute loss for embedding
if not self.legacy:
__lowerCAmelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__lowerCAmelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__lowerCAmelCase = z + (z_q - z).detach()
# reshape back to match original input shape
__lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous()
if self.remap is not None:
__lowerCAmelCase = min_encoding_indices.reshape(z.shape[0],-1 ) # add batch axis
__lowerCAmelCase = self.remap_to_used(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = min_encoding_indices.reshape(-1,1 ) # flatten
if self.sane_index_shape:
__lowerCAmelCase = min_encoding_indices.reshape(z_q.shape[0],z_q.shape[2],z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if self.remap is not None:
__lowerCAmelCase = indices.reshape(shape[0],-1 ) # add batch axis
__lowerCAmelCase = self.unmap_to_all(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE )
if shape is not None:
__lowerCAmelCase = z_q.view(__SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
__lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous()
return z_q
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = parameters
__lowerCAmelCase , __lowerCAmelCase = torch.chunk(__SCREAMING_SNAKE_CASE,2,dim=1 )
__lowerCAmelCase = torch.clamp(self.logvar,-30.0,20.0 )
__lowerCAmelCase = deterministic
__lowerCAmelCase = torch.exp(0.5 * self.logvar )
__lowerCAmelCase = torch.exp(self.logvar )
if self.deterministic:
__lowerCAmelCase = __lowerCAmelCase = torch.zeros_like(
self.mean,device=self.parameters.device,dtype=self.parameters.dtype )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
__lowerCAmelCase = randn_tensor(
self.mean.shape,generator=__SCREAMING_SNAKE_CASE,device=self.parameters.device,dtype=self.parameters.dtype )
__lowerCAmelCase = self.mean + self.std * sample
return x
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean,2 ) + self.var - 1.0 - self.logvar,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar,dim=[1, 2, 3],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
__lowerCAmelCase = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean,2 ) / self.var,dim=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.mean
| 689 | 0 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = CodeGenTokenizer
lowerCamelCase__ = CodeGenTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = {'''add_prefix_space''': True}
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : Tuple = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
snake_case__ : Optional[Any] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
snake_case__ : Optional[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ : str = {"""unk_token""": """<unk>"""}
snake_case__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = """lower newer"""
snake_case__ : Optional[Any] = """lower newer"""
return input_text, output_text
def __UpperCamelCase ( self ):
snake_case__ : Tuple = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ : str = """lower newer"""
snake_case__ : List[str] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
snake_case__ : Union[str, Any] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokens + [tokenizer.unk_token]
snake_case__ : Any = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
if not self.test_rust_tokenizer:
return
snake_case__ : int = self.get_tokenizer()
snake_case__ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = """lower newer"""
# Testing tokenization
snake_case__ : Optional[Any] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
snake_case__ : Any = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
snake_case__ : str = self.get_rust_tokenizer(add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : int = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing the unknown token
snake_case__ : Dict = tokens + [rust_tokenizer.unk_token]
snake_case__ : int = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# Simple input
snake_case__ : str = """This is a simple input"""
snake_case__ : Any = ["""This is a simple input 1""", """This is a simple input 2"""]
snake_case__ : Optional[int] = ("""This is a simple input""", """This is a pair""")
snake_case__ : Union[str, Any] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="""max_length""" )
# Simple input
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="""max_length""" )
# Simple input
self.assertRaises(
__SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="""max_length""" , )
# Pair input
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="""max_length""" )
# Pair input
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="""max_length""" )
# Pair input
self.assertRaises(
__SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="""max_length""" , )
def __UpperCamelCase ( self ):
snake_case__ : Any = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
snake_case__ : int = """This is a simple input"""
snake_case__ : List[Any] = ["""This is a simple input looooooooong""", """This is a simple input"""]
snake_case__ : List[str] = ("""This is a simple input""", """This is a pair""")
snake_case__ : List[str] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
snake_case__ : Optional[Any] = tokenizer.pad_token_id
snake_case__ : int = tokenizer(__SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=3_0 , return_tensors="""np""" )
snake_case__ : str = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncate=__SCREAMING_SNAKE_CASE , return_tensors="""np""" )
snake_case__ : Tuple = tokenizer(*__SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=6_0 , return_tensors="""np""" )
snake_case__ : Optional[Any] = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncate=__SCREAMING_SNAKE_CASE , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 3_0 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 6_0 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def __UpperCamelCase ( self ):
snake_case__ : str = """$$$"""
snake_case__ : Dict = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__SCREAMING_SNAKE_CASE , add_bos_token=__SCREAMING_SNAKE_CASE )
snake_case__ : int = """This is a simple input"""
snake_case__ : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
snake_case__ : List[str] = tokenizer.bos_token_id
snake_case__ : List[str] = tokenizer(__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer(__SCREAMING_SNAKE_CASE )
self.assertEqual(out_s.input_ids[0] , __SCREAMING_SNAKE_CASE )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
snake_case__ : str = tokenizer.decode(out_s.input_ids )
snake_case__ : Optional[int] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __SCREAMING_SNAKE_CASE )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
snake_case__ : Optional[int] = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
snake_case__ : List[str] = """\nif len_a > len_b: result = a\nelse: result = b"""
snake_case__ : int = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
snake_case__ : Any = tokenizer.decode(__SCREAMING_SNAKE_CASE , truncate_before_pattern=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
pass
| 38 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_a : Optional[int] = logging.get_logger(__name__)
_a : int = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_a : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(lowerCAmelCase_ )} )
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
a : int =field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : int =field(
default=1_28 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
a : int =field(
default=64 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
a : int =field(
default=30 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
a : float =field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a : int =field(
default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a : int =field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
a : int =field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Optional[Any] ="""train"""
a : Optional[int] ="""dev"""
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : SquadDataTrainingArguments
a : List[SquadFeatures]
a : Split
a : bool
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = Split.train,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = "pt",):
'''simple docstring'''
__lowerCAmelCase = args
__lowerCAmelCase = is_language_sensitive
__lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
try:
__lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
__lowerCAmelCase = mode
# Load data features from cache or dataset file
__lowerCAmelCase = """v2""" if args.version_2_with_negative else """v1"""
__lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase = cached_features_file + """.lock"""
with FileLock(__SCREAMING_SNAKE_CASE ):
if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache:
__lowerCAmelCase = time.time()
__lowerCAmelCase = torch.load(__SCREAMING_SNAKE_CASE )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__lowerCAmelCase = self.old_features["""features"""]
__lowerCAmelCase = self.old_features.get("""dataset""",__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.old_features.get("""examples""",__SCREAMING_SNAKE_CASE )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
""" future run""" )
else:
if mode == Split.dev:
__lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
__lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
__lowerCAmelCase , __lowerCAmelCase = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__SCREAMING_SNAKE_CASE,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples},__SCREAMING_SNAKE_CASE,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.features[i]
__lowerCAmelCase = torch.tensor(feature.input_ids,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.attention_mask,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.token_type_ids,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.cls_index,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.p_mask,dtype=torch.float )
__lowerCAmelCase = torch.tensor(feature.is_impossible,dtype=torch.float )
__lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__lowerCAmelCase = torch.tensor(feature.start_position,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 689 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
snake_case_ = True if '''large''' in model_name or '''huge''' in model_name else False
snake_case_ = True if '''large''' in model_name or '''huge''' in model_name else False
snake_case_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
snake_case_ = [3, 3, 3, 3]
snake_case_ = [5, 5, 5, 5]
elif "fl4" in model_name:
snake_case_ = [4, 4, 4, 4]
snake_case_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
snake_case_ = [3, 3, 3, 3]
if "lrf" in model_name:
snake_case_ = [3, 3, 3, 3]
else:
snake_case_ = [2, 2, 2, 2]
if "tiny" in model_name:
snake_case_ = 96
elif "small" in model_name:
snake_case_ = 96
elif "base" in model_name:
snake_case_ = 128
elif "large" in model_name:
snake_case_ = 192
elif "xlarge" in model_name:
snake_case_ = 256
elif "huge" in model_name:
snake_case_ = 352
# set label information
snake_case_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
snake_case_ = '''imagenet-22k-id2label.json'''
else:
snake_case_ = '''imagenet-1k-id2label.json'''
snake_case_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) )
snake_case_ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = FocalNetConfig(
embed_dim=SCREAMING_SNAKE_CASE__ , depths=SCREAMING_SNAKE_CASE__ , focal_levels=SCREAMING_SNAKE_CASE__ , focal_windows=SCREAMING_SNAKE_CASE__ , use_conv_embed=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ , use_post_layernorm=SCREAMING_SNAKE_CASE__ , use_layerscale=SCREAMING_SNAKE_CASE__ , )
return config
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if "patch_embed.proj" in name:
snake_case_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
snake_case_ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
snake_case_ = '''encoder.''' + name
if "encoder.layers" in name:
snake_case_ = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
snake_case_ = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
snake_case_ = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
snake_case_ = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
snake_case_ = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
snake_case_ = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
snake_case_ = '''layernorm.weight'''
if name == "norm.bias":
snake_case_ = '''layernorm.bias'''
if "head" in name:
snake_case_ = name.replace('''head''' , '''classifier''' )
else:
snake_case_ = '''focalnet.''' + name
return name
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
# fmt: off
snake_case_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
snake_case_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
snake_case_ = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case_ = val
snake_case_ = get_focalnet_config(SCREAMING_SNAKE_CASE__ )
snake_case_ = FocalNetForImageClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
# load state dict
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# verify conversion
snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ = BitImageProcessor(
do_resize=SCREAMING_SNAKE_CASE__ , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE__ , crop_size=224 , do_normalize=SCREAMING_SNAKE_CASE__ , image_mean=SCREAMING_SNAKE_CASE__ , image_std=SCREAMING_SNAKE_CASE__ , )
snake_case_ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
snake_case_ = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
snake_case_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
snake_case_ = image_transforms(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
snake_case_ = model(**SCREAMING_SNAKE_CASE__ )
snake_case_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
snake_case_ = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
snake_case_ = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
snake_case_ = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
snake_case_ = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
snake_case_ = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
snake_case_ = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
lowerCAmelCase_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 39 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _lowerCAmelCase ( lowercase ) -> Optional[Any]:
# vision encoder
if "img_encoder.pos_embed" in name:
__lowerCAmelCase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
__lowerCAmelCase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
__lowerCAmelCase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
__lowerCAmelCase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
__lowerCAmelCase = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
__lowerCAmelCase = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
__lowerCAmelCase = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
__lowerCAmelCase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
__lowerCAmelCase = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
__lowerCAmelCase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
__lowerCAmelCase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
__lowerCAmelCase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
__lowerCAmelCase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
__lowerCAmelCase = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
__lowerCAmelCase = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
__lowerCAmelCase = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
__lowerCAmelCase = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
__lowerCAmelCase = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
__lowerCAmelCase = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
__lowerCAmelCase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
__lowerCAmelCase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
__lowerCAmelCase = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
__lowerCAmelCase = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> Dict:
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(lowercase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase = key.split(""".""" )
__lowerCAmelCase , __lowerCAmelCase = int(key_split[2] ), int(key_split[4] )
__lowerCAmelCase = config.vision_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase = key.split(""".""" )
__lowerCAmelCase = int(key_split[3] )
__lowerCAmelCase = config.text_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[
dim : dim * 2, :
]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
else:
__lowerCAmelCase = rename_key(lowercase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
__lowerCAmelCase = val.squeeze_()
else:
__lowerCAmelCase = val
return orig_state_dict
def _lowerCAmelCase ( ) -> str:
__lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCAmelCase = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase , lowercase="groupvit-gcc-yfcc" , lowercase=False ) -> List[Any]:
__lowerCAmelCase = GroupViTConfig()
__lowerCAmelCase = GroupViTModel(lowercase ).eval()
__lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" )["""model"""]
__lowerCAmelCase = convert_state_dict(lowercase , lowercase )
__lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowercase , strict=lowercase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowercase ) == 0)
# verify result
__lowerCAmelCase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowercase , padding=lowercase , return_tensors="""pt""" )
with torch.no_grad():
__lowerCAmelCase = model(**lowercase )
if model_name == "groupvit-gcc-yfcc":
__lowerCAmelCase = torch.tensor([[13.35_23, 6.36_29]] )
elif model_name == "groupvit-gcc-redcaps":
__lowerCAmelCase = torch.tensor([[16.18_73, 8.62_30]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , lowercase , atol=1e-3 )
processor.save_pretrained(lowercase )
model.save_pretrained(lowercase )
print("""Successfully saved processor and model to""" , lowercase )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowercase , organization="""nielsr""" )
model.push_to_hub(lowercase , organization="""nielsr""" )
if __name__ == "__main__":
_a : int = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
_a : List[str] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 689 | 0 |
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def UpperCamelCase ( ) -> Optional[Any]:
UpperCamelCase : Union[str, Any] = torch.nn.Linear(2 , 4 )
UpperCamelCase : Optional[int] = torch.optim.AdamW(model.parameters() , lr=1.0 )
UpperCamelCase : str = torch.optim.lr_scheduler.OneCycleLR(snake_case__ , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
UpperCamelCase : Dict = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
UpperCamelCase : Optional[Any] = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def UpperCamelCase ( snake_case__ : List[str] ) -> Any:
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def UpperCamelCase ( snake_case__ : List[str] ) -> Union[str, Any]:
UpperCamelCase : Tuple = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(snake_case__ )
class lowerCAmelCase_ ( a__ ):
@require_cuda
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[Any] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = Accelerator(cpu=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Any:
UpperCamelCase : Optional[int] = Accelerator()
UpperCamelCase : Any = GradientState()
assert state.num_steps == 1
UpperCamelCase : Union[str, Any] = 4
assert state.num_steps == 4
assert state.sync_gradients is True
UpperCamelCase : Dict = False
assert state.sync_gradients is False
GradientState._reset_state()
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Optional[Any] = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = create_components()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : int = accelerator.prepare(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def snake_case_ ( self ) -> str:
UpperCamelCase : Any = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = create_components()
accelerator.prepare(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def snake_case_ ( self ) -> List[str]:
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ):
pass
with patch('torch.cuda.set_device', SCREAMING_SNAKE_CASE_ ), patch_environment(ACCELERATE_TORCH_DEVICE='cuda:64' ):
UpperCamelCase : int = Accelerator()
self.assertEqual(str(accelerator.state.device ), 'cuda:64' )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : int = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = create_components()
accelerator.prepare(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = get_signature(SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
# make sure random weights don't match
load_random_weights(SCREAMING_SNAKE_CASE_ )
self.assertTrue(abs(model_signature - get_signature(SCREAMING_SNAKE_CASE_ ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
self.assertTrue(abs(model_signature - get_signature(SCREAMING_SNAKE_CASE_ ) ) < 1e-3 )
def snake_case_ ( self ) -> Any:
UpperCamelCase : int = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = create_components()
accelerator.prepare(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = get_signature(SCREAMING_SNAKE_CASE_ )
# saving hook
def save_config(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Tuple = {'class_name': models[0].__class__.__name__}
with open(os.path.join(SCREAMING_SNAKE_CASE_, 'data.json' ), 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# loading hook
def load_config(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
with open(os.path.join(SCREAMING_SNAKE_CASE_, 'data.json' ), 'r' ) as f:
UpperCamelCase : str = json.load(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = config['class_name']
UpperCamelCase : Union[str, Any] = accelerator.register_save_state_pre_hook(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = accelerator.register_load_state_pre_hook(SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
# make sure random weights don't match with hooks
load_random_weights(SCREAMING_SNAKE_CASE_ )
self.assertTrue(abs(model_signature - get_signature(SCREAMING_SNAKE_CASE_ ) ) > 1e-3 )
# random class name to verify correct one is loaded
UpperCamelCase : List[Any] = 'random'
# make sure loaded weights match with hooks
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
self.assertTrue(abs(model_signature - get_signature(SCREAMING_SNAKE_CASE_ ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
# make sure random weights don't match with hooks removed
load_random_weights(SCREAMING_SNAKE_CASE_ )
self.assertTrue(abs(model_signature - get_signature(SCREAMING_SNAKE_CASE_ ) ) > 1e-3 )
# random class name to verify correct one is loaded
UpperCamelCase : List[str] = 'random'
# make sure loaded weights match with hooks removed
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
self.assertTrue(abs(model_signature - get_signature(SCREAMING_SNAKE_CASE_ ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : str = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = create_components()
UpperCamelCase : Optional[int] = None
# This should work
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = accelerator.prepare(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.assertTrue(dummy_obj is None )
def snake_case_ ( self ) -> str:
UpperCamelCase : Optional[int] = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = create_components()
UpperCamelCase : Union[str, Any] = [1, 2, 3]
# This should work
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = accelerator.prepare(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.assertEqual(
getattr(SCREAMING_SNAKE_CASE_, '_is_accelerate_prepared', SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_, 'Dummy object should have `_is_accelerate_prepared` set to `True`', )
self.assertEqual(
getattr(SCREAMING_SNAKE_CASE_, '_is_accelerate_prepared', SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_, 'Model is missing `_is_accelerator_prepared` or is set to `False`', )
self.assertEqual(
getattr(SCREAMING_SNAKE_CASE_, '_is_accelerate_prepared', SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_, 'Optimizer is missing `_is_accelerator_prepared` or is set to `False`', )
self.assertEqual(
getattr(SCREAMING_SNAKE_CASE_, '_is_accelerate_prepared', SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_, 'Scheduler is missing `_is_accelerator_prepared` or is set to `False`', )
self.assertEqual(
getattr(SCREAMING_SNAKE_CASE_, '_is_accelerate_prepared', SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_, 'Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`', )
self.assertEqual(
getattr(SCREAMING_SNAKE_CASE_, '_is_accelerate_prepared', SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_, 'Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`', )
@slow
@require_bnb
def snake_case_ ( self ) -> List[Any]:
from transformers import AutoModelForCausalLM
UpperCamelCase : str = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m', load_in_abit=SCREAMING_SNAKE_CASE_, device_map={'': 0}, )
UpperCamelCase : Optional[int] = Accelerator()
# This should work
UpperCamelCase : Tuple = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
@slow
@require_bnb
def snake_case_ ( self ) -> Any:
from transformers import AutoModelForCausalLM
UpperCamelCase : Dict = Accelerator()
with init_empty_weights():
UpperCamelCase : int = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m', )
model.tie_weights()
UpperCamelCase : List[str] = infer_auto_device_map(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = 'cpu'
UpperCamelCase : str = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m', device_map=SCREAMING_SNAKE_CASE_, load_in_abit=SCREAMING_SNAKE_CASE_, llm_inta_enable_fpaa_cpu_offload=SCREAMING_SNAKE_CASE_ )
# This should not work and get value error
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
@slow
@require_bnb
@require_multi_gpu
def snake_case_ ( self ) -> Union[str, Any]:
from transformers import AutoModelForCausalLM
UpperCamelCase : Tuple = {'distributed_type': DistributedType.MULTI_GPU}
with init_empty_weights():
UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m', )
model.tie_weights()
UpperCamelCase : Dict = infer_auto_device_map(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = 1
UpperCamelCase : str = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m', load_in_abit=SCREAMING_SNAKE_CASE_, device_map=SCREAMING_SNAKE_CASE_, )
UpperCamelCase : Optional[int] = Accelerator()
# This should not work and get value error
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def snake_case_ ( self ) -> int:
from transformers import AutoModelForCausalLM
with init_empty_weights():
UpperCamelCase : Tuple = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m', )
UpperCamelCase : str = infer_auto_device_map(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = 1
UpperCamelCase : Tuple = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m', load_in_abit=SCREAMING_SNAKE_CASE_, device_map=SCREAMING_SNAKE_CASE_, )
UpperCamelCase : Optional[Any] = Accelerator()
# This should work
UpperCamelCase : List[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
@require_cuda
def snake_case_ ( self ) -> int:
UpperCamelCase : Tuple = torch.nn.Linear(10, 10 )
UpperCamelCase : Any = torch.optim.SGD(model.parameters(), lr=0.01 )
UpperCamelCase : Union[str, Any] = Accelerator(cpu=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
| 40 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_a : Tuple = logging.get_logger(__name__)
_a : Optional[int] = ["""model.decoder.embed_positions.weights"""]
def _lowerCAmelCase ( lowercase ) -> Optional[Any]:
if "emb" in name:
__lowerCAmelCase = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
__lowerCAmelCase = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
__lowerCAmelCase = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
__lowerCAmelCase = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
__lowerCAmelCase = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
__lowerCAmelCase = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
__lowerCAmelCase = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
__lowerCAmelCase = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
__lowerCAmelCase = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
__lowerCAmelCase = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> Tuple[Dict, Dict]:
__lowerCAmelCase = list(state_dict.keys() )
__lowerCAmelCase = {}
for key in keys:
__lowerCAmelCase = state_dict.pop(lowercase )
__lowerCAmelCase = rename_keys(lowercase )
if "in_proj_weight" in key:
# split fused qkv proj
__lowerCAmelCase = val[:hidden_size, :]
__lowerCAmelCase = val[hidden_size : 2 * hidden_size, :]
__lowerCAmelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__lowerCAmelCase = val
else:
__lowerCAmelCase = val
return state_dict, enc_dec_proj_state_dict
def _lowerCAmelCase ( lowercase ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
__lowerCAmelCase = 1024
__lowerCAmelCase = 24
__lowerCAmelCase = 16
elif checkpoint == "medium":
__lowerCAmelCase = 1536
__lowerCAmelCase = 48
__lowerCAmelCase = 24
elif checkpoint == "large":
__lowerCAmelCase = 2048
__lowerCAmelCase = 48
__lowerCAmelCase = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
__lowerCAmelCase = MusicgenDecoderConfig(
hidden_size=lowercase , ffn_dim=hidden_size * 4 , num_hidden_layers=lowercase , num_attention_heads=lowercase , )
return config
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase=None , lowercase=None , lowercase="cpu" ) -> Optional[Any]:
__lowerCAmelCase = MusicGen.get_pretrained(lowercase , device=lowercase )
__lowerCAmelCase = decoder_config_from_checkpoint(lowercase )
__lowerCAmelCase = fairseq_model.lm.state_dict()
__lowerCAmelCase , __lowerCAmelCase = rename_state_dict(
lowercase , hidden_size=decoder_config.hidden_size )
__lowerCAmelCase = TaEncoderModel.from_pretrained("""t5-base""" )
__lowerCAmelCase = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
__lowerCAmelCase = MusicgenForCausalLM(lowercase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__lowerCAmelCase , __lowerCAmelCase = decoder.load_state_dict(lowercase , strict=lowercase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase )
if len(lowercase ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(lowercase ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
__lowerCAmelCase = MusicgenForConditionalGeneration(text_encoder=lowercase , audio_encoder=lowercase , decoder=lowercase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase )
# check we can do a forward pass
__lowerCAmelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__lowerCAmelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__lowerCAmelCase = model(input_ids=lowercase , decoder_input_ids=lowercase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
__lowerCAmelCase = AutoTokenizer.from_pretrained("""t5-base""" )
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
__lowerCAmelCase = MusicgenProcessor(feature_extractor=lowercase , tokenizer=lowercase )
# set the appropriate bos/pad token ids
__lowerCAmelCase = 2048
__lowerCAmelCase = 2048
# set other default generation config params
__lowerCAmelCase = int(30 * audio_encoder.config.frame_rate )
__lowerCAmelCase = True
__lowerCAmelCase = 3.0
if pytorch_dump_folder is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(lowercase )
processor.save_pretrained(lowercase )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(lowercase )
processor.push_to_hub(lowercase )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
_a : List[Any] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 689 | 0 |
'''simple docstring'''
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = [1]
for i in range(2 , A__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__lowercase = []
__lowercase = list(range(A__ ) )
# Find permutation
while factorials:
__lowercase = factorials.pop()
__lowercase , __lowercase = divmod(A__ , A__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
'''simple docstring'''
from collections import deque
def _lowerCAmelCase ( lowercase ) -> Dict:
__lowerCAmelCase = len(lowercase )
__lowerCAmelCase = deque()
__lowerCAmelCase = [False for _ in range(lowercase )]
__lowerCAmelCase = [-1 for _ in range(lowercase )]
__lowerCAmelCase = index_of[:]
def strong_connect(lowercase , lowercase , lowercase ):
__lowerCAmelCase = index # the number when this node is seen
__lowerCAmelCase = index # lowest rank node reachable from here
index += 1
stack.append(lowercase )
__lowerCAmelCase = True
for w in g[v]:
if index_of[w] == -1:
__lowerCAmelCase = strong_connect(lowercase , lowercase , lowercase )
__lowerCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__lowerCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__lowerCAmelCase = []
__lowerCAmelCase = stack.pop()
__lowerCAmelCase = False
component.append(lowercase )
while w != v:
__lowerCAmelCase = stack.pop()
__lowerCAmelCase = False
component.append(lowercase )
components.append(lowercase )
return index
__lowerCAmelCase = []
for v in range(lowercase ):
if index_of[v] == -1:
strong_connect(lowercase , 0 , lowercase )
return components
def _lowerCAmelCase ( lowercase , lowercase ) -> str:
__lowerCAmelCase = [[] for _ in range(lowercase )]
for u, v in edges:
g[u].append(lowercase )
return g
if __name__ == "__main__":
# Test
_a : Any = 7
_a : Tuple = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_a : Optional[int] = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_a : Optional[Any] = [(u, v) for u, v in zip(source, target)]
_a : Optional[int] = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 689 | 0 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> np.array:
lowerCamelCase_ = f'''{sampling_rate}'''
lowerCamelCase_ = '1'
lowerCamelCase_ = 'f32le'
lowerCamelCase_ = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(__UpperCamelCase ,stdin=subprocess.PIPE ,stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCamelCase_ = ffmpeg_process.communicate(__UpperCamelCase )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
lowerCamelCase_ = output_stream[0]
lowerCamelCase_ = np.frombuffer(__UpperCamelCase ,np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = "f32le" ,) -> Union[str, Any]:
lowerCamelCase_ = f'''{sampling_rate}'''
lowerCamelCase_ = '1'
if format_for_conversion == "s16le":
lowerCamelCase_ = 2
elif format_for_conversion == "f32le":
lowerCamelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowerCamelCase_ = platform.system()
if system == "Linux":
lowerCamelCase_ = 'alsa'
lowerCamelCase_ = 'default'
elif system == "Darwin":
lowerCamelCase_ = 'avfoundation'
lowerCamelCase_ = ':0'
elif system == "Windows":
lowerCamelCase_ = 'dshow'
lowerCamelCase_ = 'default'
lowerCamelCase_ = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
lowerCamelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCamelCase_ = _ffmpeg_stream(__UpperCamelCase ,__UpperCamelCase )
for item in iterator:
yield item
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = "f32le" ,) -> Any:
if stream_chunk_s is not None:
lowerCamelCase_ = stream_chunk_s
else:
lowerCamelCase_ = chunk_length_s
lowerCamelCase_ = ffmpeg_microphone(__UpperCamelCase ,__UpperCamelCase ,format_for_conversion=__UpperCamelCase )
if format_for_conversion == "s16le":
lowerCamelCase_ = np.intaa
lowerCamelCase_ = 2
elif format_for_conversion == "f32le":
lowerCamelCase_ = np.floataa
lowerCamelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowerCamelCase_ = chunk_length_s / 6
lowerCamelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__UpperCamelCase ,(int, float) ):
lowerCamelCase_ = [stride_length_s, stride_length_s]
lowerCamelCase_ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCamelCase_ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCamelCase_ = datetime.datetime.now()
lowerCamelCase_ = datetime.timedelta(seconds=__UpperCamelCase )
for item in chunk_bytes_iter(__UpperCamelCase ,__UpperCamelCase ,stride=(stride_left, stride_right) ,stream=__UpperCamelCase ):
# Put everything back in numpy scale
lowerCamelCase_ = np.frombuffer(item['raw'] ,dtype=__UpperCamelCase )
lowerCamelCase_ = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
lowerCamelCase_ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = False ) -> Optional[Any]:
lowerCamelCase_ = b''
lowerCamelCase_ ,lowerCamelCase_ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowerCamelCase_ = 0
for raw in iterator:
acc += raw
if stream and len(__UpperCamelCase ) < chunk_len:
lowerCamelCase_ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__UpperCamelCase ) >= chunk_len:
# We are flushing the accumulator
lowerCamelCase_ = (_stride_left, stride_right)
lowerCamelCase_ = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
lowerCamelCase_ = False
yield item
lowerCamelCase_ = stride_left
lowerCamelCase_ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__UpperCamelCase ) > stride_left:
lowerCamelCase_ = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
lowerCamelCase_ = False
yield item
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
lowerCamelCase_ = 2**24 # 16Mo
try:
with subprocess.Popen(__UpperCamelCase ,stdout=subprocess.PIPE ,bufsize=__UpperCamelCase ) as ffmpeg_process:
while True:
lowerCamelCase_ = ffmpeg_process.stdout.read(__UpperCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 42 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ) -> Union[str, Any]:
__lowerCAmelCase = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
__lowerCAmelCase = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(lowercase )
# Let's go
__lowerCAmelCase = parser.parse_args()
if not hasattr(lowercase , """func""" ):
parser.print_help()
exit(1 )
# Run
__lowerCAmelCase = args.func(lowercase )
service.run()
if __name__ == "__main__":
main()
| 689 | 0 |
# using dfs for finding eulerian path traversal
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
lowercase__ = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
lowercase__ , lowercase__ = True, True
lowercase__ = dfs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return path
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = 0
lowercase__ = -1
for i in range(SCREAMING_SNAKE_CASE ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
lowercase__ = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
lowercase__ , lowercase__ = check_circuit_or_path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
lowercase__ = 1
if check == 2:
lowercase__ = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
lowercase__ = dfs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(SCREAMING_SNAKE_CASE )
def _a ( ):
"""simple docstring"""
lowercase__ = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
lowercase__ = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
lowercase__ = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
lowercase__ = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
lowercase__ = {
1: [],
2: []
# all degree is zero
}
lowercase__ = 10
check_euler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
check_euler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
check_euler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
check_euler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
check_euler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 43 |
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_a : List[Any] = logging.get_logger(__name__)
_a : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
_a : Any = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> str:
for attribute in key.split(""".""" ):
__lowerCAmelCase = getattr(lowercase , lowercase )
if weight_type is not None:
__lowerCAmelCase = getattr(lowercase , lowercase ).shape
else:
__lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
__lowerCAmelCase = value
elif weight_type == "weight_g":
__lowerCAmelCase = value
elif weight_type == "weight_v":
__lowerCAmelCase = value
elif weight_type == "bias":
__lowerCAmelCase = value
else:
__lowerCAmelCase = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]:
__lowerCAmelCase = []
__lowerCAmelCase = fairseq_model.state_dict()
__lowerCAmelCase = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , )
__lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
__lowerCAmelCase = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split(""".""" )[:-1] ) != key):
# special case since naming is very similar
continue
__lowerCAmelCase = True
if "*" in mapped_key:
__lowerCAmelCase = name.split(lowercase )[0].split(""".""" )[-2]
__lowerCAmelCase = mapped_key.replace("""*""" , lowercase )
if "weight_g" in name:
__lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
__lowerCAmelCase = """weight_v"""
elif "bias" in name:
__lowerCAmelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase = """weight"""
else:
__lowerCAmelCase = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__lowerCAmelCase = full_name.split("""conv_layers.""" )[-1]
__lowerCAmelCase = name.split(""".""" )
__lowerCAmelCase = int(items[0] )
__lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Dict:
if config_path is not None:
__lowerCAmelCase = UniSpeechSatConfig.from_pretrained(lowercase )
else:
__lowerCAmelCase = UniSpeechSatConfig()
__lowerCAmelCase = """"""
if is_finetuned:
__lowerCAmelCase = UniSpeechSatForCTC(lowercase )
else:
__lowerCAmelCase = UniSpeechSatForPreTraining(lowercase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
__lowerCAmelCase = model[0].eval()
recursively_load_weights(lowercase , lowercase )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
_a : List[str] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_a : Union[str, Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 689 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = KandinskyVaaControlnetImgaImgPipeline
lowerCAmelCase_ = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCAmelCase_ = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCAmelCase_ = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCAmelCase_ = False
@property
def lowerCamelCase_ ( self : Any ):
return 3_2
@property
def lowerCamelCase_ ( self : List[str] ):
return 3_2
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
return self.time_input_dim
@property
def lowerCamelCase_ ( self : Any ):
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self : str ):
return 1_0_0
@property
def lowerCamelCase_ ( self : Optional[Any] ):
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_lowerCamelCase : Any = UNetaDConditionModel(**__A )
return model
@property
def lowerCamelCase_ ( self : Any ):
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase_ ( self : Optional[Any] ):
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Any = self.dummy_unet
_lowerCamelCase : int = self.dummy_movq
_lowerCamelCase : Optional[int] = {
"num_train_timesteps": 1_0_0_0,
"beta_schedule": "linear",
"beta_start": 0.00085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_lowerCamelCase : Optional[Any] = DDIMScheduler(**__A )
_lowerCamelCase : Dict = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCamelCase_ ( self : Dict,__A : List[Any],__A : int=0 ):
_lowerCamelCase : List[str] = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(seed + 1 ) ).to(
__A )
# create init_image
_lowerCamelCase : Any = floats_tensor((1, 3, 6_4, 6_4),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Optional[Any] = image.cpu().permute(0,2,3,1 )[0]
_lowerCamelCase : List[Any] = Image.fromarray(np.uinta(__A ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
# create hint
_lowerCamelCase : str = floats_tensor((1, 3, 6_4, 6_4),rng=random.Random(__A ) ).to(__A )
if str(__A ).startswith("mps" ):
_lowerCamelCase : int = torch.manual_seed(__A )
else:
_lowerCamelCase : Union[str, Any] = torch.Generator(device=__A ).manual_seed(__A )
_lowerCamelCase : Optional[Any] = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 1_0,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = "cpu"
_lowerCamelCase : List[Any] = self.get_dummy_components()
_lowerCamelCase : Any = self.pipeline_class(**__A )
_lowerCamelCase : Optional[int] = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowerCamelCase : Optional[Any] = pipe(**self.get_dummy_inputs(__A ) )
_lowerCamelCase : List[Any] = output.images
_lowerCamelCase : Optional[Any] = pipe(
**self.get_dummy_inputs(__A ),return_dict=__A,)[0]
_lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
_lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_lowerCamelCase : Any = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" )
_lowerCamelCase : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
_lowerCamelCase : Any = init_image.resize((5_1_2, 5_1_2) )
_lowerCamelCase : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
_lowerCamelCase : List[str] = torch.from_numpy(np.array(__A ) ).float() / 255.0
_lowerCamelCase : Union[str, Any] = hint.permute(2,0,1 ).unsqueeze(0 )
_lowerCamelCase : int = "A robot, 4k photo"
_lowerCamelCase : Optional[Any] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior",torch_dtype=torch.floataa )
pipe_prior.to(__A )
_lowerCamelCase : str = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth",torch_dtype=torch.floataa )
_lowerCamelCase : Optional[int] = pipeline.to(__A )
pipeline.set_progress_bar_config(disable=__A )
_lowerCamelCase : Optional[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
_lowerCamelCase , _lowerCamelCase : List[Any] = pipe_prior(
__A,image=__A,strength=0.85,generator=__A,negative_prompt="",).to_tuple()
_lowerCamelCase : Any = pipeline(
image=__A,image_embeds=__A,negative_image_embeds=__A,hint=__A,generator=__A,num_inference_steps=1_0_0,height=5_1_2,width=5_1_2,strength=0.5,output_type="np",)
_lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(__A,__A ) | 44 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
_a : str = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
_a : Dict = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
_a : List[str] = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ),reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = spearmanr(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 689 | 0 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def A ( lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : Any , lowercase__ : List[str] ) -> str:
# Initialise PyTorch model
UpperCamelCase__ :Tuple = FunnelConfig.from_json_file(lowercase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
UpperCamelCase__ :int = FunnelBaseModel(lowercase__ ) if base_model else FunnelModel(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
UpperCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
) | 45 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=lowerCAmelCase_ ):
a : List[str] =["""onnx"""]
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(self,["""onnx"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""onnx"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""onnx"""] )
| 689 | 0 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCAmelCase : Optional[int] = 16
_lowerCAmelCase : List[Any] = 32
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = 16 ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : int = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowerCamelCase : Optional[int] = load_dataset("glue" , "mrpc" )
def tokenize_function(_lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : Optional[int] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCamelCase : Optional[Any] = datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCamelCase : Optional[int] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCamelCase : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCamelCase : Any = 16
elif accelerator.mixed_precision != "no":
_lowerCamelCase : Dict = 8
else:
_lowerCamelCase : Optional[int] = None
return tokenizer.pad(
_lowerCamelCase , padding="longest" , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
_lowerCamelCase : Union[str, Any] = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
_lowerCamelCase : int = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCAmelCase : Dict = mocked_dataloaders # noqa: F811
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCamelCase ) == "1":
_lowerCamelCase : Optional[Any] = 2
# Initialize accelerator
_lowerCamelCase : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCamelCase : Any = config["lr"]
_lowerCamelCase : Tuple = int(config["num_epochs"] )
_lowerCamelCase : Union[str, Any] = int(config["seed"] )
_lowerCamelCase : Optional[Any] = int(config["batch_size"] )
_lowerCamelCase : Tuple = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
_lowerCamelCase : Optional[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_lowerCamelCase : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
_lowerCamelCase : Dict = MAX_GPU_BATCH_SIZE
set_seed(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase : Any = get_dataloaders(_lowerCamelCase , _lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCamelCase : str = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCamelCase : Tuple = model.to(accelerator.device )
# Instantiate optimizer
_lowerCamelCase : Any = AdamW(params=model.parameters() , lr=_lowerCamelCase )
# Instantiate scheduler
_lowerCamelCase : Any = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Now we train the model
for epoch in range(_lowerCamelCase ):
model.train()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_lowerCamelCase : Dict = model(**_lowerCamelCase )
_lowerCamelCase : List[str] = outputs.loss
_lowerCamelCase : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(_lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
_lowerCamelCase : Any = 0
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCamelCase : str = model(**_lowerCamelCase )
_lowerCamelCase : Tuple = outputs.logits.argmax(dim=-1 )
_lowerCamelCase, _lowerCamelCase : int = accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(_lowerCamelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
_lowerCamelCase : Dict = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_lowerCamelCase : Optional[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=_lowerCamelCase , references=_lowerCamelCase , )
_lowerCamelCase : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , _lowerCamelCase )
def lowerCamelCase_( ) -> Any:
'''simple docstring'''
_lowerCamelCase : int = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowerCamelCase , default=_lowerCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
_lowerCamelCase : List[str] = parser.parse_args()
_lowerCamelCase : List[Any] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main() | 46 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_a : int = logging.get_logger(__name__)
_a : Optional[int] = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[str] ="""gptj"""
a : Optional[int] ={
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self,__SCREAMING_SNAKE_CASE=5_04_00,__SCREAMING_SNAKE_CASE=20_48,__SCREAMING_SNAKE_CASE=40_96,__SCREAMING_SNAKE_CASE=28,__SCREAMING_SNAKE_CASE=16,__SCREAMING_SNAKE_CASE=64,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="gelu_new",__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=1e-5,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=False,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_embd
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_inner
__lowerCAmelCase = rotary_dim
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = use_cache
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(
bos_token_id=__SCREAMING_SNAKE_CASE,eos_token_id=__SCREAMING_SNAKE_CASE,tie_word_embeddings=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = "default",__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False,):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE,task=__SCREAMING_SNAKE_CASE,patching_specs=__SCREAMING_SNAKE_CASE,use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config,"""pad_token_id""",__SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
__lowerCAmelCase = 0
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE,direction="""inputs""" )
__lowerCAmelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._config.n_head
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,):
'''simple docstring'''
__lowerCAmelCase = super(__SCREAMING_SNAKE_CASE,self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE,batch_size=__SCREAMING_SNAKE_CASE,seq_length=__SCREAMING_SNAKE_CASE,is_pair=__SCREAMING_SNAKE_CASE,framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowerCAmelCase = seqlen + 2
__lowerCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCAmelCase = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
__lowerCAmelCase = common_inputs["""attention_mask"""]
if self.use_past:
__lowerCAmelCase = ordered_inputs["""attention_mask"""].dtype
__lowerCAmelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,dtype=__SCREAMING_SNAKE_CASE )],dim=1 )
return ordered_inputs
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 13
| 689 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = '''glpn'''
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE__ : List[Any]=[8, 4, 2, 1] , SCREAMING_SNAKE_CASE__ : str=[3_2, 6_4, 1_6_0, 2_5_6] , SCREAMING_SNAKE_CASE__ : Dict=[7, 3, 3, 3] , SCREAMING_SNAKE_CASE__ : Tuple=[4, 2, 2, 2] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[1, 2, 5, 8] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[4, 4, 4, 4] , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-6 , SCREAMING_SNAKE_CASE__ : int=6_4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_0 , SCREAMING_SNAKE_CASE__ : Any=-1 , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ )
__a : List[str] = num_channels
__a : Dict = num_encoder_blocks
__a : List[str] = depths
__a : Optional[int] = sr_ratios
__a : int = hidden_sizes
__a : str = patch_sizes
__a : Union[str, Any] = strides
__a : str = mlp_ratios
__a : Optional[int] = num_attention_heads
__a : List[Any] = hidden_act
__a : Any = hidden_dropout_prob
__a : List[Any] = attention_probs_dropout_prob
__a : str = initializer_range
__a : int = drop_path_rate
__a : int = layer_norm_eps
__a : Optional[Any] = decoder_hidden_size
__a : Dict = max_depth
__a : Union[str, Any] = head_in_index
| 47 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase = 5000_0000 ) -> int:
__lowerCAmelCase = set()
__lowerCAmelCase = int((limit - 24) ** (1 / 2) )
__lowerCAmelCase = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowercase ) ) )
for primea in primes:
__lowerCAmelCase = primea * primea
for primea in primes:
__lowerCAmelCase = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
__lowerCAmelCase = primea * primea * primea * primea
__lowerCAmelCase = square + cube + tetr
if total >= limit:
break
ret.add(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(f'{solution() = }')
| 689 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
UpperCAmelCase__ : Optional[Any] = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 48 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Optional[int] =TextToVideoSDPipeline
a : Optional[int] =TEXT_TO_IMAGE_PARAMS
a : Any =TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a : Union[str, Any] =frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D"""),up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D"""),cross_attention_dim=32,attention_head_dim=4,)
__lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0085,beta_end=0.012,beta_schedule="""scaled_linear""",clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,)
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],latent_channels=4,sample_size=1_28,)
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1e-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=10_00,hidden_act="""gelu""",projection_dim=5_12,)
__lowerCAmelCase = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = TextToVideoSDPipeline(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = """np"""
__lowerCAmelCase = sd_pipe(**__SCREAMING_SNAKE_CASE ).frames
__lowerCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__lowerCAmelCase = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available(),reason="""XFormers attention is only available with CUDA and `xformers` installed""",)
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
__lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowerCAmelCase = pipe.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=25,output_type="""pt""" ).frames
__lowerCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
__lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase = pipe.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=2,output_type="""pt""" ).frames
__lowerCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 689 | 0 |
"""simple docstring"""
def lowercase__ ( snake_case_ :str ):
__UpperCAmelCase = 0
for ch in input_str:
__UpperCAmelCase = ord(snake_case_ )
__UpperCAmelCase = pow(2 , snake_case_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _lowerCAmelCase ( lowercase ) -> Optional[int]:
if not is_accelerate_available():
return method
__lowerCAmelCase = version.parse(accelerate.__version__ ).base_version
if version.parse(lowercase ) < version.parse("""0.17.0""" ):
return method
def wrapper(self , *lowercase , **lowercase ):
if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ):
self._hf_hook.pre_forward(self )
return method(self , *lowercase , **lowercase )
return wrapper
| 689 | 0 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
UpperCamelCase : Tuple = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
UpperCamelCase : str = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
UpperCamelCase : Union[str, Any] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCamelCase__ (datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) ,reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=1 ,_lowerCAmelCase="binary" ,_lowerCAmelCase=None ):
lowerCamelCase__ = fa_score(
_lowerCAmelCase ,_lowerCAmelCase ,labels=_lowerCAmelCase ,pos_label=_lowerCAmelCase ,average=_lowerCAmelCase ,sample_weight=_lowerCAmelCase )
return {"f1": float(_lowerCAmelCase ) if score.size == 1 else score}
| 50 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
# load base model
__lowerCAmelCase = StableDiffusionPipeline.from_pretrained(lowercase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
__lowerCAmelCase = load_file(lowercase )
__lowerCAmelCase = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
__lowerCAmelCase = pipeline.text_encoder
else:
__lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
__lowerCAmelCase = pipeline.unet
# find the target layer
__lowerCAmelCase = layer_infos.pop(0 )
while len(lowercase ) > -1:
try:
__lowerCAmelCase = curr_layer.__getattr__(lowercase )
if len(lowercase ) > 0:
__lowerCAmelCase = layer_infos.pop(0 )
elif len(lowercase ) == 0:
break
except Exception:
if len(lowercase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
__lowerCAmelCase = layer_infos.pop(0 )
__lowerCAmelCase = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(lowercase )
else:
pair_keys.append(lowercase )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
__lowerCAmelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
__lowerCAmelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase ).unsqueeze(2 ).unsqueeze(3 )
else:
__lowerCAmelCase = state_dict[pair_keys[0]].to(torch.floataa )
__lowerCAmelCase = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase )
# update visited list
for item in pair_keys:
visited.append(lowercase )
return pipeline
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
_a : Optional[int] = parser.parse_args()
_a : Dict = args.base_model_path
_a : Optional[Any] = args.checkpoint_path
_a : Union[str, Any] = args.dump_path
_a : Optional[int] = args.lora_prefix_unet
_a : int = args.lora_prefix_text_encoder
_a : str = args.alpha
_a : Any = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_a : Tuple = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 689 | 0 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
a__ : Union[str, Any] = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
if subparsers is not None:
UpperCAmelCase = subparsers.add_parser('''tpu-config''' , description=_description )
else:
UpperCAmelCase = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
UpperCAmelCase = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=SCREAMING_SNAKE_CASE_ , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=SCREAMING_SNAKE_CASE_ , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
UpperCAmelCase = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=SCREAMING_SNAKE_CASE_ , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
return parser
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCAmelCase = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCAmelCase = defaults.commands
if not args.tpu_name:
UpperCAmelCase = defaults.tpu_name
if not args.tpu_zone:
UpperCAmelCase = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCAmelCase = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
UpperCAmelCase = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase = f"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
UpperCAmelCase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCAmelCase = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f"pip install {args.accelerate_version}"]
new_cmd += args.command
UpperCAmelCase = '''; '''.join(SCREAMING_SNAKE_CASE_ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCAmelCase = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f"Running {' '.join(SCREAMING_SNAKE_CASE_ )}" )
return
subprocess.run(SCREAMING_SNAKE_CASE_ )
print('''Successfully setup pod.''' )
def __snake_case ( ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = tpu_command_parser()
UpperCAmelCase = parser.parse_args()
tpu_command_launcher(SCREAMING_SNAKE_CASE_ )
| 51 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def _lowerCAmelCase ( lowercase = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2
def _lowerCAmelCase ( lowercase = "" ) -> bool:
if len(lowercase ) == 0:
return True
__lowerCAmelCase = input_str.replace(""" """ , """""" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__lowerCAmelCase = {}
for character in lower_case_input_str:
__lowerCAmelCase = character_freq_dict.get(lowercase , 0 ) + 1
__lowerCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _lowerCAmelCase ( lowercase = "" ) -> None:
print("""\nFor string = """ , lowercase , """:""" )
print(
"""> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(lowercase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
print(
"""> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(lowercase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
if __name__ == "__main__":
_a : int = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
_a : Optional[int] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
| 689 | 0 |
"""simple docstring"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __A ( a_ :BertModel , a_ :str , a_ :str) -> str:
__a : List[str] = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
__a : Any = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(a_):
os.makedirs(a_)
__a : List[Any] = model.state_dict()
def to_tf_var_name(a_ :str):
for patt, repl in iter(a_):
__a : int = name.replace(a_ , a_)
return F"""bert/{name}"""
def create_tf_var(a_ :np.ndarray , a_ :str , a_ :tf.Session):
__a : int = tf.dtypes.as_dtype(tensor.dtype)
__a : Optional[int] = tf.get_variable(dtype=a_ , shape=tensor.shape , name=a_ , initializer=tf.zeros_initializer())
session.run(tf.variables_initializer([tf_var]))
session.run(a_)
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__a : Any = to_tf_var_name(a_)
__a : Tuple = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose):
__a : List[Any] = torch_tensor.T
__a : Optional[Any] = create_tf_var(tensor=a_ , name=a_ , session=a_)
tf.keras.backend.set_value(a_ , a_)
__a : int = session.run(a_)
print(F"""Successfully created {tf_name}: {np.allclose(a_ , a_)}""")
__a : Tuple = tf.train.Saver(tf.trainable_variables())
saver.save(a_ , os.path.join(a_ , model_name.replace('''-''' , '''_''') + '''.ckpt'''))
def __A ( a_ :int=None) -> str:
__a : str = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=a_ , required=a_ , help='''model name e.g. bert-base-uncased''')
parser.add_argument(
'''--cache_dir''' , type=a_ , default=a_ , required=a_ , help='''Directory containing pytorch model''')
parser.add_argument('''--pytorch_model_path''' , type=a_ , required=a_ , help='''/path/to/<pytorch-model-name>.bin''')
parser.add_argument('''--tf_cache_dir''' , type=a_ , required=a_ , help='''Directory in which to save tensorflow model''')
__a : Optional[Any] = parser.parse_args(a_)
__a : Optional[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=a_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name)
if __name__ == "__main__":
main() | 52 |
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _lowerCAmelCase ( lowercase ) -> List[Any]:
__lowerCAmelCase = VideoMAEConfig()
set_architecture_configs(lowercase , lowercase )
if "finetuned" not in model_name:
__lowerCAmelCase = False
if "finetuned" in model_name:
__lowerCAmelCase = """huggingface/label-files"""
if "kinetics" in model_name:
__lowerCAmelCase = 400
__lowerCAmelCase = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
__lowerCAmelCase = 174
__lowerCAmelCase = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
__lowerCAmelCase = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="""dataset""" ) , """r""" ) )
__lowerCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase ( lowercase , lowercase ) -> Any:
if "small" in model_name:
__lowerCAmelCase = 384
__lowerCAmelCase = 1536
__lowerCAmelCase = 12
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 3
__lowerCAmelCase = 192
__lowerCAmelCase = 768
elif "large" in model_name:
__lowerCAmelCase = 1024
__lowerCAmelCase = 4096
__lowerCAmelCase = 24
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 8
__lowerCAmelCase = 512
__lowerCAmelCase = 2048
elif "huge" in model_name:
__lowerCAmelCase = 1280
__lowerCAmelCase = 5120
__lowerCAmelCase = 32
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 8
__lowerCAmelCase = 640
__lowerCAmelCase = 2560
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def _lowerCAmelCase ( lowercase ) -> List[str]:
if "encoder." in name:
__lowerCAmelCase = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
__lowerCAmelCase = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
__lowerCAmelCase = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
__lowerCAmelCase = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__lowerCAmelCase = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
__lowerCAmelCase = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
__lowerCAmelCase = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
__lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
__lowerCAmelCase = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
__lowerCAmelCase = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
__lowerCAmelCase = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
__lowerCAmelCase = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
__lowerCAmelCase = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__lowerCAmelCase = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__lowerCAmelCase = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
__lowerCAmelCase = name.replace("""head""" , """classifier""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]:
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(lowercase )
if key.startswith("""encoder.""" ):
__lowerCAmelCase = key.replace("""encoder.""" , """""" )
if "qkv" in key:
__lowerCAmelCase = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
__lowerCAmelCase = config.decoder_hidden_size
__lowerCAmelCase = int(key_split[2] )
__lowerCAmelCase = """decoder.decoder_layers."""
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = config.hidden_size
__lowerCAmelCase = int(key_split[1] )
__lowerCAmelCase = """videomae.encoder.layer."""
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val
return orig_state_dict
def _lowerCAmelCase ( ) -> str:
__lowerCAmelCase = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__lowerCAmelCase = np.load(lowercase )
return list(lowercase )
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__lowerCAmelCase = get_videomae_config(lowercase )
if "finetuned" in model_name:
__lowerCAmelCase = VideoMAEForVideoClassification(lowercase )
else:
__lowerCAmelCase = VideoMAEForPreTraining(lowercase )
# download original checkpoint, hosted on Google Drive
__lowerCAmelCase = """pytorch_model.bin"""
gdown.cached_download(lowercase , lowercase , quiet=lowercase )
__lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" )
if "model" in files:
__lowerCAmelCase = files["""model"""]
else:
__lowerCAmelCase = files["""module"""]
__lowerCAmelCase = convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
# verify model on basic input
__lowerCAmelCase = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__lowerCAmelCase = prepare_video()
__lowerCAmelCase = image_processor(lowercase , return_tensors="""pt""" )
if "finetuned" not in model_name:
__lowerCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
__lowerCAmelCase = torch.load(lowercase )
__lowerCAmelCase = model(**lowercase )
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
__lowerCAmelCase = torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(f'Model name not supported. Should be one of {model_names}' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , lowercase , atol=1e-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__lowerCAmelCase = outputs.loss
assert torch.allclose(lowercase , lowercase , atol=1e-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model and image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase )
model.save_pretrained(lowercase )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase , organization="""nielsr""" )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_a : int = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 689 | 0 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : List[str] = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : str ):
__lowerCAmelCase = RobertaPreLayerNormConfig.from_pretrained(
lowerCAmelCase_, architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
__lowerCAmelCase = torch.load(hf_hub_download(repo_id=lowerCAmelCase_, filename='pytorch_model.bin' ) )
__lowerCAmelCase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
__lowerCAmelCase = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
__lowerCAmelCase = tensor_value
__lowerCAmelCase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCAmelCase_, config=lowerCAmelCase_, state_dict=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
# convert tokenizer
__lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case : Union[str, Any] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 53 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_a : Tuple = """\
"""
_a : Tuple = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
_a : Optional[Any] = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ),reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = 16,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__lowerCAmelCase = """cuda"""
else:
__lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__lowerCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__SCREAMING_SNAKE_CASE ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__lowerCAmelCase = model.config.max_length - 1
else:
__lowerCAmelCase = model.config.max_length
__lowerCAmelCase = tokenizer(
__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,padding=__SCREAMING_SNAKE_CASE,truncation=__SCREAMING_SNAKE_CASE,max_length=__SCREAMING_SNAKE_CASE,return_tensors="""pt""",return_attention_mask=__SCREAMING_SNAKE_CASE,).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = encodings["""input_ids"""]
__lowerCAmelCase = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ),1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ),2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__lowerCAmelCase = []
__lowerCAmelCase = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0,len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase = min(start_index + batch_size,len(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = encoded_texts[start_index:end_index]
__lowerCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
__lowerCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch],dim=1 )
__lowerCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size(),dtype=torch.intaa ).to(__SCREAMING_SNAKE_CASE ), attn_mask],dim=1 )
__lowerCAmelCase = encoded_batch
with torch.no_grad():
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE ).logits
__lowerCAmelCase = out_logits[..., :-1, :].contiguous()
__lowerCAmelCase = labels[..., 1:].contiguous()
__lowerCAmelCase = attn_mask[..., 1:].contiguous()
__lowerCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1,2 ),__SCREAMING_SNAKE_CASE ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__SCREAMING_SNAKE_CASE )}
| 689 | 0 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class A :
_snake_case =field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be trained.'''} )
_snake_case =field(
default='''./''' , metadata={'''help''': '''Save dir where model repo is cloned and models updates are saved to.'''} )
_snake_case =field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path of training dataset.'''} )
_snake_case =field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
_snake_case =field(default=2 , metadata={'''help''': '''Batch size for training.'''} )
_snake_case =field(default=2 , metadata={'''help''': '''Batch size for evaluation.'''} )
_snake_case =field(default=0.1 , metadata={'''help''': '''Value of weight decay.'''} )
_snake_case =field(
default=10000 , metadata={'''help''': '''Size of buffer used to shuffle streaming dataset.'''} )
_snake_case =field(default=2E-4 , metadata={'''help''': '''Learning rate fo training.'''} )
_snake_case =field(default='''cosine''' , metadata={'''help''': '''Learning rate.'''} )
_snake_case =field(
default=750 , metadata={'''help''': '''Number of warmup steps in the learning rate schedule.'''} )
_snake_case =field(
default=16 , metadata={'''help''': '''Number of gradient accumulation steps.'''} )
_snake_case =field(
default=__lowercase , metadata={'''help''': '''Use gradient checkpointing to reduce memory footprint.'''} )
_snake_case =field(default=50000 , metadata={'''help''': '''Maximum number of training steps.'''} )
_snake_case =field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
_snake_case =field(default=1024 , metadata={'''help''': '''Sequence lengths used for training.'''} )
_snake_case =field(default=1 , metadata={'''help''': '''Training seed.'''} )
_snake_case =field(
default=1024 , metadata={'''help''': '''Interval to save checkpoints. Measured as number of forward passes not training steps.'''} , )
_snake_case =field(
default=__lowercase , metadata={'''help''': '''States path if the training should continue from a checkpoint folder.'''} )
_snake_case =field(default=__lowercase , metadata={'''help''': '''If True the data is pretokenized.'''} )
@dataclass
class A :
_snake_case =field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
_snake_case =field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
_snake_case =field(default=2 , metadata={'''help''': '''Batch size used for evaluation.'''} )
_snake_case =field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
_snake_case =field(default=1024 , metadata={'''help''': '''Length of sequences to be evaluated.'''} )
_snake_case =field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
@dataclass
class A :
_snake_case =field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
_snake_case =field(default=__lowercase , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
_snake_case =field(
default=__lowercase , metadata={'''help''': '''The number of human-eval tasks to run. If not included all tasks are evaluated.'''} , )
_snake_case =field(
default=__lowercase , metadata={'''help''': '''Sample from the language model\'s output distribution.'''} )
_snake_case =field(default=0.2 , metadata={'''help''': '''Sampling temperature used for generation.'''} )
_snake_case =field(default=256 , metadata={'''help''': '''Maximum number of newly generated tokens.'''} )
_snake_case =field(default=0 , metadata={'''help''': '''Top-k parameter used for generation.'''} )
_snake_case =field(default=0.95 , metadata={'''help''': '''Top-p parameter used for nucleus sampling.'''} )
_snake_case =field(default=10 , metadata={'''help''': '''Number of generations to run in parallel.'''} )
_snake_case =field(
default=200 , metadata={'''help''': '''Number of completions to generate for each sample.'''} )
_snake_case =field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
_snake_case =field(
default='''eval_results.json''' , metadata={'''help''': '''Random seed used for evaluation.'''} )
_snake_case =field(
default='''0''' , metadata={'''help''': '''Allow `code_eval` to execute Python code on machine'''} )
_snake_case =field(
default=-1 , metadata={
'''help''': (
'''Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'''
''' number corresponds to which GPU device id to run on.'''
)
} , )
@dataclass
class A :
_snake_case =field(
default=__lowercase , metadata={
'''help''': '''The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'''
} , )
_snake_case =field(
default='''transformersbook/codeparrot''' , metadata={'''help''': '''Folder or name of dataset to process.'''} )
_snake_case =field(
default='''codeparrot-clean''' , metadata={'''help''': '''Folder to save processed processed dataset.'''} )
_snake_case =field(
default=100000 , metadata={'''help''': '''Number of files to save per JSON output file.'''} )
_snake_case =field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
_snake_case =field(
default=1000 , metadata={'''help''': '''Maximum line length in file, otherwise file is filtered.'''} )
_snake_case =field(
default=100 , metadata={'''help''': '''Maximum mean line length in file, otherwise file is filtered.'''} )
_snake_case =field(
default=0.25 , metadata={'''help''': '''Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'''} )
_snake_case =field(
default=1.5 , metadata={'''help''': '''Minimum character token ratio for the file, otherwise file is filtered.'''} )
_snake_case =field(
default=0.7 , metadata={'''help''': '''Probability for filtering config, test and uncommon files.'''} )
_snake_case =field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} , )
_snake_case =field(
default=__lowercase , metadata={'''help''': '''If True, near-duplicate samples are removed.'''} )
_snake_case =field(
default=0.85 , metadata={'''help''': '''Jaccard threshold for near-duplicate samples.'''} )
@dataclass
class A :
_snake_case =field(
default='''gpt2''' , metadata={'''help''': '''Base tokenizer to build new tokenizer from.'''} )
_snake_case =field(
default='''transformersbook/codeparrot-train''' , metadata={'''help''': '''Dataset to train tokenizer on.'''} )
_snake_case =field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
_snake_case =field(default=200000 , metadata={'''help''': '''Number of examples to train tokenizer on.'''} )
_snake_case =field(
default=32768 , metadata={'''help''': '''Number of examples to train the tokenizer on.'''} )
_snake_case =field(default='''codeparrot''' , metadata={'''help''': '''Name of new tokenizer.'''} )
_snake_case =field(default=__lowercase , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
@dataclass
class A :
_snake_case =field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} )
_snake_case =field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path to the dataset to pretokenize.'''} )
_snake_case =field(
default='''tokenized-codeparrot-train''' , metadata={'''help''': '''Repo name of the pretokenized data.'''} )
_snake_case =field(default=__lowercase , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
@dataclass
class A :
_snake_case =field(
default='''gpt2-large''' , metadata={'''help''': '''Configuration to use for model initialization.'''} )
_snake_case =field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Tokenizer attached to model.'''} )
_snake_case =field(default='''codeparrot''' , metadata={'''help''': '''Name of the created model.'''} )
_snake_case =field(default=__lowercase , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
| 54 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Union[str, Any] =["""image_processor"""]
a : Dict ="""SamImageProcessor"""
def __init__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.image_processor
__lowerCAmelCase = -10
__lowerCAmelCase = self.image_processor.size["""longest_edge"""]
def __call__( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = self.image_processor(
__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
# pop arguments that are not used in the foward but used nevertheless
__lowerCAmelCase = encoding_image_processor["""original_sizes"""]
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks if Torch or TF tensor
__lowerCAmelCase = original_sizes.numpy()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._check_and_preprocess_points(
input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = self._normalize_and_convert(
__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,)
return encoding_image_processor
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="pt",):
'''simple docstring'''
if input_points is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0] ) for point in input_points
]
else:
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
for point, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__lowerCAmelCase , __lowerCAmelCase = self._pad_points_and_labels(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_labels is not None:
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0],is_bounding_box=__SCREAMING_SNAKE_CASE )
for box in input_boxes
]
else:
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,is_bounding_box=__SCREAMING_SNAKE_CASE )
for box, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
]
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
__lowerCAmelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points} )
if input_labels is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels} )
return encoding_image_processor
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = max([point.shape[0] for point in input_points] )
__lowerCAmelCase = []
for i, point in enumerate(__SCREAMING_SNAKE_CASE ):
if point.shape[0] != expected_nb_points:
__lowerCAmelCase = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value],axis=0 )
__lowerCAmelCase = np.append(input_labels[i],[self.point_pad_value] )
processed_input_points.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = processed_input_points
return input_points, input_labels
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = original_size
__lowerCAmelCase , __lowerCAmelCase = self.image_processor._get_preprocess_shape(__SCREAMING_SNAKE_CASE,longest_edge=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = deepcopy(__SCREAMING_SNAKE_CASE ).astype(__SCREAMING_SNAKE_CASE )
if is_bounding_box:
__lowerCAmelCase = coords.reshape(-1,2,2 )
__lowerCAmelCase = coords[..., 0] * (new_w / old_w)
__lowerCAmelCase = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__lowerCAmelCase = coords.reshape(-1,4 )
return coords
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,):
'''simple docstring'''
if input_points is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks for TF or Torch tensor
__lowerCAmelCase = input_points.numpy().tolist()
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_points[0],__SCREAMING_SNAKE_CASE ):
raise ValueError("""Input points must be a list of list of floating points.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for input_point in input_points]
else:
__lowerCAmelCase = None
if input_labels is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ):
__lowerCAmelCase = input_labels.numpy().tolist()
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_labels[0],__SCREAMING_SNAKE_CASE ):
raise ValueError("""Input labels must be a list of list integers.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for label in input_labels]
else:
__lowerCAmelCase = None
if input_boxes is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ):
__lowerCAmelCase = input_boxes.numpy().tolist()
if (
not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0],__SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0][0],__SCREAMING_SNAKE_CASE )
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa ) for box in input_boxes]
else:
__lowerCAmelCase = None
return input_points, input_labels, input_boxes
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(__SCREAMING_SNAKE_CASE ) )
def lowerCamelCase__ ( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.image_processor.post_process_masks(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
| 689 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Dict = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "ibert"
def __init__( self : Union[str, Any] ,A : Tuple=3_05_22 ,A : Optional[Any]=7_68 ,A : List[Any]=12 ,A : Optional[Any]=12 ,A : List[str]=30_72 ,A : Union[str, Any]="gelu" ,A : str=0.1 ,A : int=0.1 ,A : Dict=5_12 ,A : str=2 ,A : Any=0.02 ,A : str=1E-12 ,A : List[str]=1 ,A : str=0 ,A : Optional[Any]=2 ,A : Union[str, Any]="absolute" ,A : Optional[int]=False ,A : Any="none" ,**A : str ,):
super().__init__(pad_token_id=A ,bos_token_id=A ,eos_token_id=A ,**A )
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = hidden_act
__A = intermediate_size
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = initializer_range
__A = layer_norm_eps
__A = position_embedding_type
__A = quant_mode
__A = force_dequant
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Tuple ):
if self.task == "multiple-choice":
__A = {0: "batch", 1: "choice", 2: "sequence"}
else:
__A = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 55 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
_a : int = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
a : Optional[str] =field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
a : Optional[str] =field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
a : int =field(
default=10_24 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
a : bool =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the training data."""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the validation data."""} )
a : Optional[str] =field(default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the test data."""} )
def lowerCamelCase__ ( self ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
__lowerCAmelCase = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__lowerCAmelCase = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _UpperCAmelCase :
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a : str =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def _lowerCAmelCase ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
__lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(lowercase )
datasets.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__lowerCAmelCase = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__lowerCAmelCase = data_args.train_file.split(""".""" )[-1]
__lowerCAmelCase = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__lowerCAmelCase = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(f'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
__lowerCAmelCase = load_dataset("""csv""" , data_files=lowercase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__lowerCAmelCase = load_dataset("""json""" , data_files=lowercase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__lowerCAmelCase = raw_datasets["""train"""].features["""label"""].names
__lowerCAmelCase = len(lowercase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__lowerCAmelCase = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowercase , )
__lowerCAmelCase = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__lowerCAmelCase = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__lowerCAmelCase = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__lowerCAmelCase = {"""Refused""": 0, """Entailed""": 1}
__lowerCAmelCase = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
__lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowercase ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowercase ):
__lowerCAmelCase = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
__lowerCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__lowerCAmelCase = examples["""statement"""]
__lowerCAmelCase = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
__lowerCAmelCase = tokenizer(lowercase , lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase )
__lowerCAmelCase = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
__lowerCAmelCase = raw_datasets.map(
lowercase , batched=lowercase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__lowerCAmelCase = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
__lowerCAmelCase = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__lowerCAmelCase = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
__lowerCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
__lowerCAmelCase = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
__lowerCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowercase ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase ):
__lowerCAmelCase = p.predictions[0] if isinstance(p.predictions , lowercase ) else p.predictions
__lowerCAmelCase = np.argmax(lowercase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__lowerCAmelCase = default_data_collator
elif training_args.fpaa:
__lowerCAmelCase = DataCollatorWithPadding(lowercase , pad_to_multiple_of=8 )
else:
__lowerCAmelCase = None
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=lowercase , args=lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase , tokenizer=lowercase , data_collator=lowercase , )
# Training
if training_args.do_train:
__lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowercase )
__lowerCAmelCase = train_result.metrics
__lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase )
)
__lowerCAmelCase = min(lowercase , len(lowercase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , lowercase )
trainer.save_metrics("""train""" , lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowerCAmelCase = trainer.evaluate(eval_dataset=lowercase )
__lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase )
__lowerCAmelCase = min(lowercase , len(lowercase ) )
trainer.log_metrics("""eval""" , lowercase )
trainer.save_metrics("""eval""" , lowercase )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__lowerCAmelCase = predict_dataset.remove_columns("""label""" )
__lowerCAmelCase = trainer.predict(lowercase , metric_key_prefix="""predict""" ).predictions
__lowerCAmelCase = np.argmax(lowercase , axis=1 )
__lowerCAmelCase = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(lowercase , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(lowercase ):
__lowerCAmelCase = label_list[item]
writer.write(f'{index}\t{item}\n' )
__lowerCAmelCase = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase )
else:
trainer.create_model_card(**lowercase )
def _lowerCAmelCase ( lowercase ) -> str:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 689 | 0 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : str = logging.get_logger(__name__)
_a : List[Any] = {"vocab_file": "spiece.model"}
_a : Tuple = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
}
}
_a : Optional[Any] = {
"google/bigbird-roberta-base": 4_096,
"google/bigbird-roberta-large": 4_096,
"google/bigbird-base-trivia-itc": 4_096,
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Optional[int] = ["input_ids", "attention_mask"]
_SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int="<unk>" , SCREAMING_SNAKE_CASE_ : Any="<s>" , SCREAMING_SNAKE_CASE_ : List[str]="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="<pad>" , SCREAMING_SNAKE_CASE_ : List[str]="[SEP]" , SCREAMING_SNAKE_CASE_ : Any="[MASK]" , SCREAMING_SNAKE_CASE_ : Dict="[CLS]" , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> None:
__snake_case = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
__snake_case = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
__snake_case = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
__snake_case = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
__snake_case = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
__snake_case = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__snake_case = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
__snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
__snake_case = vocab_file
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE_ )
@property
def a ( self : List[str] ) -> int:
return self.sp_model.get_piece_size()
def a ( self : Dict ) -> str:
__snake_case = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) -> str:
__snake_case = self.__dict__.copy()
__snake_case = None
return state
def __setstate__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[int]:
__snake_case = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__snake_case = {}
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def a ( self : str , SCREAMING_SNAKE_CASE_ : Any ) -> str:
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict ) -> List[str]:
__snake_case = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE_ )
return token
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
__snake_case = []
__snake_case = ''
__snake_case = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
__snake_case = True
__snake_case = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
__snake_case = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : bool = True , **SCREAMING_SNAKE_CASE_ : int , ) -> str:
__snake_case = kwargs.pop('use_source_tokenizer' , SCREAMING_SNAKE_CASE_ )
__snake_case = self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__snake_case = []
__snake_case = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ ) )
__snake_case = []
sub_texts.append(SCREAMING_SNAKE_CASE_ )
else:
current_sub_text.append(SCREAMING_SNAKE_CASE_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
__snake_case = re.sub(r' (\[(MASK|SEP)\])' , r'\1' , ' '.join(SCREAMING_SNAKE_CASE_ ) )
else:
__snake_case = ''.join(SCREAMING_SNAKE_CASE_ )
__snake_case = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__snake_case = self.clean_up_tokenization(SCREAMING_SNAKE_CASE_ )
return clean_text
else:
return text
def a ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__snake_case = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as fi:
__snake_case = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
def a ( self : Any , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case = [self.cls_token_id]
__snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def a ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 56 |
'''simple docstring'''
import os
import sys
import unittest
_a : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_a : Union[str, Any] = os.path.join(git_repo_path, """src""", """diffusers""")
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = find_backend(""" if not is_torch_available():""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__lowerCAmelCase = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__lowerCAmelCase = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch_and_transformers_and_onnx""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""",__SCREAMING_SNAKE_CASE )
self.assertIn("""torch_and_transformers""",__SCREAMING_SNAKE_CASE )
self.assertIn("""flax_and_transformers""",__SCREAMING_SNAKE_CASE )
self.assertIn("""torch_and_transformers_and_onnx""",__SCREAMING_SNAKE_CASE )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""",objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""",objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""",objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""",objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""",objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""",objects["""torch_and_transformers_and_onnx"""] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = create_dummy_object("""CONSTANT""","""'torch'""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""\nCONSTANT = None\n""" )
__lowerCAmelCase = create_dummy_object("""function""","""'torch'""" )
self.assertEqual(
__SCREAMING_SNAKE_CASE,"""\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
__lowerCAmelCase = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
__lowerCAmelCase = create_dummy_object("""FakeClass""","""'torch'""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
__lowerCAmelCase = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""],__SCREAMING_SNAKE_CASE )
| 689 | 0 |
def snake_case (UpperCAmelCase__ ) -> list:
UpperCamelCase_: Optional[Any] = int(UpperCAmelCase__ )
if n_element < 1:
UpperCamelCase_: List[str] = ValueError('a should be a positive number' )
raise my_error
UpperCamelCase_: List[Any] = [1]
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Any = (0, 0, 0)
UpperCamelCase_: Optional[Any] = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
A_ : Optional[Any] = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
A_ : int = hamming(int(n))
print('-----------------------------------------------------')
print(F'''The list with nth numbers is: {hamming_numbers}''')
print('-----------------------------------------------------') | 57 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase ) -> tuple[int, int]:
try:
__lowerCAmelCase = float(lowercase )
except ValueError:
raise ValueError("""Please enter a valid number""" )
__lowerCAmelCase = decimal - int(lowercase )
if fractional_part == 0:
return int(lowercase ), 1
else:
__lowerCAmelCase = len(str(lowercase ).split(""".""" )[1] )
__lowerCAmelCase = int(decimal * (10**number_of_frac_digits) )
__lowerCAmelCase = 10**number_of_frac_digits
__lowerCAmelCase , __lowerCAmelCase = denominator, numerator
while True:
__lowerCAmelCase = dividend % divisor
if remainder == 0:
break
__lowerCAmelCase , __lowerCAmelCase = divisor, remainder
__lowerCAmelCase , __lowerCAmelCase = numerator / divisor, denominator / divisor
return int(lowercase ), int(lowercase )
if __name__ == "__main__":
print(f'{decimal_to_fraction(2) = }')
print(f'{decimal_to_fraction(89.0) = }')
print(f'{decimal_to_fraction("67") = }')
print(f'{decimal_to_fraction("45.0") = }')
print(f'{decimal_to_fraction(1.5) = }')
print(f'{decimal_to_fraction("6.25") = }')
print(f'{decimal_to_fraction("78td") = }')
| 689 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : List[str] = WavaVecaForSequenceClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
snake_case_ : int = downstream_dict["""projector.weight"""]
snake_case_ : Optional[int] = downstream_dict["""projector.bias"""]
snake_case_ : List[Any] = downstream_dict["""model.post_net.linear.weight"""]
snake_case_ : Union[str, Any] = downstream_dict["""model.post_net.linear.bias"""]
return model
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = WavaVecaForAudioFrameClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
snake_case_ : Any = downstream_dict["""model.linear.weight"""]
snake_case_ : int = downstream_dict["""model.linear.bias"""]
return model
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = WavaVecaForXVector.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
snake_case_ : Any = downstream_dict["""connector.weight"""]
snake_case_ : str = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
snake_case_ : Dict = downstream_dict[
F'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
snake_case_ : int = downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias']
snake_case_ : str = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
snake_case_ : int = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
snake_case_ : Any = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
snake_case_ : int = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
snake_case_ : List[str] = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : Any = torch.load(__UpperCamelCase , map_location="""cpu""" )
snake_case_ : Any = checkpoint["""Downstream"""]
snake_case_ : Optional[Any] = WavaVecaConfig.from_pretrained(__UpperCamelCase )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
__UpperCamelCase , return_attention_mask=__UpperCamelCase , do_normalize=__UpperCamelCase )
snake_case_ : Optional[Any] = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
snake_case_ : Tuple = convert_classification(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif arch.endswith("""ForAudioFrameClassification""" ):
snake_case_ : Union[str, Any] = convert_diarization(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif arch.endswith("""ForXVector""" ):
snake_case_ : List[str] = convert_xvector(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
snake_case_ : List[Any] = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(__UpperCamelCase )
hf_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
__lowerCAmelCase : Dict = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 58 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_a : Dict = _symbol_database.Default()
_a : Union[str, Any] = _descriptor_pool.Default().AddSerializedFile(
b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
_a : str = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_a : str = None
_a : Union[str, Any] = b"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_a : Optional[int] = 4_5
_a : List[Any] = 1_5_8_1
_a : str = 1_5_1_7
_a : Optional[Any] = 1_5_7_0
_a : List[str] = 1_5_8_4
_a : List[Any] = 1_7_9_3
_a : Union[str, Any] = 1_7_9_5
_a : Tuple = 1_9_1_6
_a : List[Any] = 1_8_6_4
_a : Any = 1_9_0_5
_a : Optional[Any] = 1_9_1_9
_a : Optional[int] = 2_4_2_9
_a : Tuple = 2_2_0_8
_a : Optional[Any] = 2_4_1_8
_a : List[Any] = 2_3_2_3
_a : str = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 689 | 0 |
from __future__ import annotations
def lowerCAmelCase_ ( __a ) -> bool:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =len(__a )
# We need to create solution object to save path.
lowerCamelCase__: Optional[Any] =[[0 for _ in range(__a )] for _ in range(__a )]
lowerCamelCase__: int =run_maze(__a , 0 , 0 , __a )
if solved:
print("\n".join(str(__a ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def lowerCAmelCase_ ( __a , __a , __a , __a ) -> bool:
"""simple docstring"""
lowerCamelCase__: List[str] =len(__a )
# Final check point.
if i == j == (size - 1):
lowerCamelCase__: List[str] =1
return True
lowerCamelCase__: Optional[int] =(not i < 0) and (not j < 0) # Check lower bounds
lowerCamelCase__: Dict =(i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowerCamelCase__: Optional[Any] =(not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowerCamelCase__: Optional[Any] =1
# check for directions
if (
run_maze(__a , i + 1 , __a , __a )
or run_maze(__a , __a , j + 1 , __a )
or run_maze(__a , i - 1 , __a , __a )
or run_maze(__a , __a , j - 1 , __a )
):
return True
lowerCamelCase__: List[Any] =0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : torch.FloatTensor
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE=True,):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = layers_per_block
__lowerCAmelCase = torch.nn.Convad(
__SCREAMING_SNAKE_CASE,block_out_channels[0],kernel_size=3,stride=1,padding=1,)
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
# down
__lowerCAmelCase = block_out_channels[0]
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = block_out_channels[i]
__lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase = get_down_block(
__SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,add_downsample=not is_final_block,resnet_eps=1e-6,downsample_padding=0,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
__lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""",attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
# out
__lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[-1],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = 2 * out_channels if double_z else out_channels
__lowerCAmelCase = nn.Convad(block_out_channels[-1],__SCREAMING_SNAKE_CASE,3,padding=1 )
__lowerCAmelCase = False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = x
__lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(""">=""","""1.11.0""" ):
for down_block in self.down_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
__lowerCAmelCase = down_block(__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE )
# post-process
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE="group",):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = layers_per_block
__lowerCAmelCase = nn.Convad(
__SCREAMING_SNAKE_CASE,block_out_channels[-1],kernel_size=3,stride=1,padding=1,)
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
__lowerCAmelCase = in_channels if norm_type == """spatial""" else None
# mid
__lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type,attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
# up
__lowerCAmelCase = list(reversed(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = reversed_block_out_channels[i]
__lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase = get_up_block(
__SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block + 1,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,prev_output_channel=__SCREAMING_SNAKE_CASE,add_upsample=not is_final_block,resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,resnet_time_scale_shift=__SCREAMING_SNAKE_CASE,)
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = output_channel
# out
if norm_type == "spatial":
__lowerCAmelCase = SpatialNorm(block_out_channels[0],__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[0],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = nn.Convad(block_out_channels[0],__SCREAMING_SNAKE_CASE,3,padding=1 )
__lowerCAmelCase = False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
__lowerCAmelCase = z
__lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(""">=""","""1.11.0""" ):
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
else:
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
else:
# middle
__lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = up_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="random",__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = n_e
__lowerCAmelCase = vq_embed_dim
__lowerCAmelCase = beta
__lowerCAmelCase = legacy
__lowerCAmelCase = nn.Embedding(self.n_e,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e,1.0 / self.n_e )
__lowerCAmelCase = remap
if self.remap is not None:
self.register_buffer("""used""",torch.tensor(np.load(self.remap ) ) )
__lowerCAmelCase = self.used.shape[0]
__lowerCAmelCase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__lowerCAmelCase = self.re_embed
__lowerCAmelCase = self.re_embed + 1
print(
f'Remapping {self.n_e} indices to {self.re_embed} indices. '
f'Using {self.unknown_index} for unknown indices.' )
else:
__lowerCAmelCase = n_e
__lowerCAmelCase = sane_index_shape
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
__lowerCAmelCase = inds.reshape(ishape[0],-1 )
__lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = (inds[:, :, None] == used[None, None, ...]).long()
__lowerCAmelCase = match.argmax(-1 )
__lowerCAmelCase = match.sum(2 ) < 1
if self.unknown_index == "random":
__lowerCAmelCase = torch.randint(0,self.re_embed,size=new[unknown].shape ).to(device=new.device )
else:
__lowerCAmelCase = self.unknown_index
return new.reshape(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
__lowerCAmelCase = inds.reshape(ishape[0],-1 )
__lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
__lowerCAmelCase = 0 # simply set to zero
__lowerCAmelCase = torch.gather(used[None, :][inds.shape[0] * [0], :],1,__SCREAMING_SNAKE_CASE )
return back.reshape(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = z.permute(0,2,3,1 ).contiguous()
__lowerCAmelCase = z.view(-1,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__lowerCAmelCase = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE,self.embedding.weight ),dim=1 )
__lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape )
__lowerCAmelCase = None
__lowerCAmelCase = None
# compute loss for embedding
if not self.legacy:
__lowerCAmelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__lowerCAmelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__lowerCAmelCase = z + (z_q - z).detach()
# reshape back to match original input shape
__lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous()
if self.remap is not None:
__lowerCAmelCase = min_encoding_indices.reshape(z.shape[0],-1 ) # add batch axis
__lowerCAmelCase = self.remap_to_used(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = min_encoding_indices.reshape(-1,1 ) # flatten
if self.sane_index_shape:
__lowerCAmelCase = min_encoding_indices.reshape(z_q.shape[0],z_q.shape[2],z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if self.remap is not None:
__lowerCAmelCase = indices.reshape(shape[0],-1 ) # add batch axis
__lowerCAmelCase = self.unmap_to_all(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE )
if shape is not None:
__lowerCAmelCase = z_q.view(__SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
__lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous()
return z_q
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = parameters
__lowerCAmelCase , __lowerCAmelCase = torch.chunk(__SCREAMING_SNAKE_CASE,2,dim=1 )
__lowerCAmelCase = torch.clamp(self.logvar,-30.0,20.0 )
__lowerCAmelCase = deterministic
__lowerCAmelCase = torch.exp(0.5 * self.logvar )
__lowerCAmelCase = torch.exp(self.logvar )
if self.deterministic:
__lowerCAmelCase = __lowerCAmelCase = torch.zeros_like(
self.mean,device=self.parameters.device,dtype=self.parameters.dtype )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
__lowerCAmelCase = randn_tensor(
self.mean.shape,generator=__SCREAMING_SNAKE_CASE,device=self.parameters.device,dtype=self.parameters.dtype )
__lowerCAmelCase = self.mean + self.std * sample
return x
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean,2 ) + self.var - 1.0 - self.logvar,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar,dim=[1, 2, 3],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
__lowerCAmelCase = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean,2 ) / self.var,dim=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.mean
| 689 | 0 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowerCamelCase_ ( _UpperCamelCase ) -> list[list[float]]:
"""simple docstring"""
snake_case_ : Tuple = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_UpperCamelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
snake_case_ : Any = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
snake_case_ : List[str] = [[0.0, 0.0], [0.0, 0.0]]
snake_case_ , snake_case_ : Tuple = matrix[1][1], matrix[0][0]
snake_case_ , snake_case_ : Optional[Any] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_UpperCamelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_UpperCamelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
snake_case_ : Tuple = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
snake_case_ : List[Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
snake_case_ : Optional[Any] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
snake_case_ : Optional[Any] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
snake_case_ : Any = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
snake_case_ : str = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
snake_case_ : List[str] = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
snake_case_ : Any = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
snake_case_ : str = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
snake_case_ : Optional[Any] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
snake_case_ : int = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
snake_case_ : Tuple = array(_UpperCamelCase )
for i in range(3 ):
for j in range(3 ):
snake_case_ : Tuple = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
snake_case_ : Any = array(_UpperCamelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_UpperCamelCase )
# Calculate the inverse of the matrix
return [[float(d(_UpperCamelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 60 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_a : Optional[int] = logging.get_logger(__name__)
_a : int = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_a : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(lowerCAmelCase_ )} )
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
a : int =field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : int =field(
default=1_28 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
a : int =field(
default=64 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
a : int =field(
default=30 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
a : float =field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a : int =field(
default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a : int =field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
a : int =field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Optional[Any] ="""train"""
a : Optional[int] ="""dev"""
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : SquadDataTrainingArguments
a : List[SquadFeatures]
a : Split
a : bool
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = Split.train,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = "pt",):
'''simple docstring'''
__lowerCAmelCase = args
__lowerCAmelCase = is_language_sensitive
__lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
try:
__lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
__lowerCAmelCase = mode
# Load data features from cache or dataset file
__lowerCAmelCase = """v2""" if args.version_2_with_negative else """v1"""
__lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase = cached_features_file + """.lock"""
with FileLock(__SCREAMING_SNAKE_CASE ):
if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache:
__lowerCAmelCase = time.time()
__lowerCAmelCase = torch.load(__SCREAMING_SNAKE_CASE )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__lowerCAmelCase = self.old_features["""features"""]
__lowerCAmelCase = self.old_features.get("""dataset""",__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.old_features.get("""examples""",__SCREAMING_SNAKE_CASE )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
""" future run""" )
else:
if mode == Split.dev:
__lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
__lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
__lowerCAmelCase , __lowerCAmelCase = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__SCREAMING_SNAKE_CASE,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples},__SCREAMING_SNAKE_CASE,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.features[i]
__lowerCAmelCase = torch.tensor(feature.input_ids,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.attention_mask,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.token_type_ids,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.cls_index,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.p_mask,dtype=torch.float )
__lowerCAmelCase = torch.tensor(feature.is_impossible,dtype=torch.float )
__lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__lowerCAmelCase = torch.tensor(feature.start_position,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 689 | 0 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = 'https://openaipublic.azureedge.net/jukebox/models/'
UpperCamelCase = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def _A ( lowerCAmelCase_ : Any ):
"""simple docstring"""
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
lowerCAmelCase__ = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
lowerCAmelCase__ = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
lowerCAmelCase__ = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
lowerCAmelCase__ = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
lowerCAmelCase__ = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
lowerCAmelCase__ = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowerCAmelCase__ = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
lowerCAmelCase__ = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def _A ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = {}
import re
lowerCAmelCase__ = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
lowerCAmelCase__ = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
lowerCAmelCase__ = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
lowerCAmelCase__ = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
lowerCAmelCase__ = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
lowerCAmelCase__ = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
lowerCAmelCase__ = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
lowerCAmelCase__ = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
lowerCAmelCase__ = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(lowerCAmelCase_ ):
lowerCAmelCase__ = re_encoder_block_conv_in.match(lowerCAmelCase_ )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = int(groups[2] ) * 2 + int(groups[3] )
lowerCAmelCase__ = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
lowerCAmelCase__ = re_encoder_block_conv_in.sub(lowerCAmelCase_ , lowerCAmelCase_ )
elif re_encoder_block_resnet.fullmatch(lowerCAmelCase_ ):
lowerCAmelCase__ = re_encoder_block_resnet.match(lowerCAmelCase_ )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = int(groups[2] ) * 2 + int(groups[3] )
lowerCAmelCase__ = {"1": 1, "3": 2}[groups[-2]]
lowerCAmelCase__ = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
lowerCAmelCase__ = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
lowerCAmelCase__ = prefix + resnet_block
lowerCAmelCase__ = re_encoder_block_resnet.sub(lowerCAmelCase_ , lowerCAmelCase_ )
elif re_encoder_block_proj_out.fullmatch(lowerCAmelCase_ ):
lowerCAmelCase__ = re_encoder_block_proj_out.match(lowerCAmelCase_ )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = F'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
lowerCAmelCase__ = re_encoder_block_proj_out.sub(lowerCAmelCase_ , lowerCAmelCase_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(lowerCAmelCase_ ):
lowerCAmelCase__ = re_decoder_block_conv_out.match(lowerCAmelCase_ )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCAmelCase__ = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
lowerCAmelCase__ = re_decoder_block_conv_out.sub(lowerCAmelCase_ , lowerCAmelCase_ )
elif re_decoder_block_resnet.fullmatch(lowerCAmelCase_ ):
lowerCAmelCase__ = re_decoder_block_resnet.match(lowerCAmelCase_ )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCAmelCase__ = {"1": 1, "3": 2}[groups[-2]]
lowerCAmelCase__ = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
lowerCAmelCase__ = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
lowerCAmelCase__ = prefix + resnet_block
lowerCAmelCase__ = re_decoder_block_resnet.sub(lowerCAmelCase_ , lowerCAmelCase_ )
elif re_decoder_block_proj_in.fullmatch(lowerCAmelCase_ ):
lowerCAmelCase__ = re_decoder_block_proj_in.match(lowerCAmelCase_ )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = F'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
lowerCAmelCase__ = re_decoder_block_proj_in.sub(lowerCAmelCase_ , lowerCAmelCase_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(lowerCAmelCase_ ):
lowerCAmelCase__ = re_prior_cond_conv_out.match(lowerCAmelCase_ )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCAmelCase__ = F'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
lowerCAmelCase__ = re_prior_cond_conv_out.sub(lowerCAmelCase_ , lowerCAmelCase_ )
elif re_prior_cond_resnet.fullmatch(lowerCAmelCase_ ):
lowerCAmelCase__ = re_prior_cond_resnet.match(lowerCAmelCase_ )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCAmelCase__ = {"1": 1, "3": 2}[groups[-2]]
lowerCAmelCase__ = F'conditioner_blocks.upsampler.upsample_block.{block_index}.'
lowerCAmelCase__ = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
lowerCAmelCase__ = prefix + resnet_block
lowerCAmelCase__ = re_prior_cond_resnet.sub(lowerCAmelCase_ , lowerCAmelCase_ )
elif re_prior_cond_proj_in.fullmatch(lowerCAmelCase_ ):
lowerCAmelCase__ = re_prior_cond_proj_in.match(lowerCAmelCase_ )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = F'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
lowerCAmelCase__ = re_prior_cond_proj_in.sub(lowerCAmelCase_ , lowerCAmelCase_ )
# keep original key
else:
lowerCAmelCase__ = original_key
lowerCAmelCase__ = replace_key(lowerCAmelCase_ )
if F'{key_prefix}.{key}' not in model_state_dict or key is None:
print(F'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[F'{key_prefix}.{key}'].shape:
lowerCAmelCase__ = model_state_dict[F'{key_prefix}.{key}']
print(F'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
lowerCAmelCase__ = original_key
lowerCAmelCase__ = original_key
lowerCAmelCase__ = value
return new_dict
@torch.no_grad()
def _A ( lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[Any]=None ):
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
lowerCAmelCase__ = requests.get(F'{PREFIX}{file}' , allow_redirects=lowerCAmelCase_ )
os.makedirs(F'{pytorch_dump_folder_path}/' , exist_ok=lowerCAmelCase_ )
open(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , "wb" ).write(r.content )
lowerCAmelCase__ = MODEL_MAPPING[model_name.split("/" )[-1]]
lowerCAmelCase__ = JukeboxConfig.from_pretrained(lowerCAmelCase_ )
lowerCAmelCase__ = JukeboxModel(lowerCAmelCase_ )
lowerCAmelCase__ = []
lowerCAmelCase__ = {}
for i, dict_name in enumerate(lowerCAmelCase_ ):
lowerCAmelCase__ = torch.load(F'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )["model"]
lowerCAmelCase__ = {}
for k in old_dic.keys():
if k.endswith(".b" ):
lowerCAmelCase__ = old_dic[k]
elif k.endswith(".w" ):
lowerCAmelCase__ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowerCAmelCase__ = old_dic[k]
else:
lowerCAmelCase__ = old_dic[k]
lowerCAmelCase__ = "vqvae" if i == 0 else F'priors.{3 - i}'
lowerCAmelCase__ = fix_jukebox_keys(lowerCAmelCase_ , model.state_dict() , lowerCAmelCase_ , lowerCAmelCase_ )
weight_dict.append(lowerCAmelCase_ )
lowerCAmelCase__ = weight_dict.pop(0 )
model.vqvae.load_state_dict(lowerCAmelCase_ )
for i in range(len(lowerCAmelCase_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
with open(F'{pytorch_dump_folder_path}/mapping.json' , "w" ) as txtfile:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase_ )
return weight_dict
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
UpperCamelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 61 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _lowerCAmelCase ( lowercase ) -> Optional[Any]:
# vision encoder
if "img_encoder.pos_embed" in name:
__lowerCAmelCase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
__lowerCAmelCase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
__lowerCAmelCase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
__lowerCAmelCase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
__lowerCAmelCase = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
__lowerCAmelCase = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
__lowerCAmelCase = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
__lowerCAmelCase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
__lowerCAmelCase = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
__lowerCAmelCase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
__lowerCAmelCase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
__lowerCAmelCase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
__lowerCAmelCase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
__lowerCAmelCase = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
__lowerCAmelCase = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
__lowerCAmelCase = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
__lowerCAmelCase = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
__lowerCAmelCase = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
__lowerCAmelCase = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
__lowerCAmelCase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
__lowerCAmelCase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
__lowerCAmelCase = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
__lowerCAmelCase = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> Dict:
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(lowercase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase = key.split(""".""" )
__lowerCAmelCase , __lowerCAmelCase = int(key_split[2] ), int(key_split[4] )
__lowerCAmelCase = config.vision_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase = key.split(""".""" )
__lowerCAmelCase = int(key_split[3] )
__lowerCAmelCase = config.text_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[
dim : dim * 2, :
]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
else:
__lowerCAmelCase = rename_key(lowercase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
__lowerCAmelCase = val.squeeze_()
else:
__lowerCAmelCase = val
return orig_state_dict
def _lowerCAmelCase ( ) -> str:
__lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCAmelCase = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase , lowercase="groupvit-gcc-yfcc" , lowercase=False ) -> List[Any]:
__lowerCAmelCase = GroupViTConfig()
__lowerCAmelCase = GroupViTModel(lowercase ).eval()
__lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" )["""model"""]
__lowerCAmelCase = convert_state_dict(lowercase , lowercase )
__lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowercase , strict=lowercase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowercase ) == 0)
# verify result
__lowerCAmelCase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowercase , padding=lowercase , return_tensors="""pt""" )
with torch.no_grad():
__lowerCAmelCase = model(**lowercase )
if model_name == "groupvit-gcc-yfcc":
__lowerCAmelCase = torch.tensor([[13.35_23, 6.36_29]] )
elif model_name == "groupvit-gcc-redcaps":
__lowerCAmelCase = torch.tensor([[16.18_73, 8.62_30]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , lowercase , atol=1e-3 )
processor.save_pretrained(lowercase )
model.save_pretrained(lowercase )
print("""Successfully saved processor and model to""" , lowercase )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowercase , organization="""nielsr""" )
model.push_to_hub(lowercase , organization="""nielsr""" )
if __name__ == "__main__":
_a : int = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
_a : List[str] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 689 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = """▁"""
snake_case = {"""vocab_file""": """sentencepiece.bpe.model"""}
snake_case = {
"""vocab_file""": {
"""facebook/mbart-large-50-one-to-many-mmt""": (
"""https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"""
),
}
}
snake_case = {
"""facebook/mbart-large-50-one-to-many-mmt""": 1_024,
}
# fmt: off
snake_case = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""]
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] = VOCAB_FILES_NAMES
UpperCamelCase_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[Any] = ['''input_ids''', '''attention_mask''']
UpperCamelCase_ : List[int] = []
UpperCamelCase_ : List[int] = []
def __init__( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Any="</s>" , UpperCAmelCase_ : Optional[int]="</s>" , UpperCAmelCase_ : str="<s>" , UpperCAmelCase_ : List[Any]="<unk>" , UpperCAmelCase_ : List[str]="<pad>" , UpperCAmelCase_ : Any="<mask>" , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : List[str] , ):
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE : Tuple = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
SCREAMING_SNAKE_CASE : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE : Dict = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[int] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE : Optional[int] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
SCREAMING_SNAKE_CASE : Any = len(self.sp_model )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCAmelCase_ )
}
SCREAMING_SNAKE_CASE : List[str] = {v: k for k, v in self.lang_code_to_id.items()}
SCREAMING_SNAKE_CASE : str = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
SCREAMING_SNAKE_CASE : List[str] = src_lang if src_lang is not None else "en_XX"
SCREAMING_SNAKE_CASE : Tuple = self.lang_code_to_id[self._src_lang]
SCREAMING_SNAKE_CASE : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _A ( self : Tuple ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _A ( self : List[Any] ):
return self._src_lang
@src_lang.setter
def _A ( self : Optional[int] , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE : Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Tuple ):
SCREAMING_SNAKE_CASE : List[Any] = self.__dict__.copy()
SCREAMING_SNAKE_CASE : Dict = None
return state
def __setstate__( self : List[Any] , UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE : str = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _A ( self : List[str] , UpperCAmelCase_ : str ):
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
def _A ( self : Tuple , UpperCAmelCase_ : str ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE : Optional[Any] = self.sp_model.PieceToId(UpperCAmelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _A ( self : int , UpperCAmelCase_ : int ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _A ( self : List[str] , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : List[Any] = ""
SCREAMING_SNAKE_CASE : Optional[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase_ ) + token
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Optional[int] = []
else:
current_sub_tokens.append(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = False
out_string += self.sp_model.decode(UpperCAmelCase_ )
return out_string.strip()
def _A ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , "wb" ) as fi:
SCREAMING_SNAKE_CASE : List[str] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
def _A ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = [1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE : Optional[Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCAmelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCAmelCase_ )) + ([0] * len(UpperCAmelCase_ )) + suffix_ones
def _A ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _A ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] , UpperCAmelCase_ : Optional[str] , **UpperCAmelCase_ : Any ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
SCREAMING_SNAKE_CASE : List[Any] = src_lang
SCREAMING_SNAKE_CASE : str = self(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.convert_tokens_to_ids(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = tgt_lang_id
return inputs
def _A ( self : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str = "en_XX" , UpperCAmelCase_ : Optional[List[str]] = None , UpperCAmelCase_ : str = "ro_RO" , **UpperCAmelCase_ : Union[str, Any] , ):
SCREAMING_SNAKE_CASE : Dict = src_lang
SCREAMING_SNAKE_CASE : Dict = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : Optional[Any] ):
return self.set_src_lang_special_tokens(self.src_lang )
def _A ( self : Dict ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _A ( self : Any , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE : List[Any] = self.lang_code_to_id[src_lang]
SCREAMING_SNAKE_CASE : Dict = [self.cur_lang_code_id]
SCREAMING_SNAKE_CASE : str = [self.eos_token_id]
def _A ( self : Tuple , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.lang_code_to_id[tgt_lang]
SCREAMING_SNAKE_CASE : List[Any] = [self.cur_lang_code_id]
SCREAMING_SNAKE_CASE : List[Any] = [self.eos_token_id]
| 62 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_a : Tuple = logging.get_logger(__name__)
_a : Optional[int] = ["""model.decoder.embed_positions.weights"""]
def _lowerCAmelCase ( lowercase ) -> Optional[Any]:
if "emb" in name:
__lowerCAmelCase = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
__lowerCAmelCase = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
__lowerCAmelCase = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
__lowerCAmelCase = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
__lowerCAmelCase = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
__lowerCAmelCase = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
__lowerCAmelCase = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
__lowerCAmelCase = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
__lowerCAmelCase = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
__lowerCAmelCase = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> Tuple[Dict, Dict]:
__lowerCAmelCase = list(state_dict.keys() )
__lowerCAmelCase = {}
for key in keys:
__lowerCAmelCase = state_dict.pop(lowercase )
__lowerCAmelCase = rename_keys(lowercase )
if "in_proj_weight" in key:
# split fused qkv proj
__lowerCAmelCase = val[:hidden_size, :]
__lowerCAmelCase = val[hidden_size : 2 * hidden_size, :]
__lowerCAmelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__lowerCAmelCase = val
else:
__lowerCAmelCase = val
return state_dict, enc_dec_proj_state_dict
def _lowerCAmelCase ( lowercase ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
__lowerCAmelCase = 1024
__lowerCAmelCase = 24
__lowerCAmelCase = 16
elif checkpoint == "medium":
__lowerCAmelCase = 1536
__lowerCAmelCase = 48
__lowerCAmelCase = 24
elif checkpoint == "large":
__lowerCAmelCase = 2048
__lowerCAmelCase = 48
__lowerCAmelCase = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
__lowerCAmelCase = MusicgenDecoderConfig(
hidden_size=lowercase , ffn_dim=hidden_size * 4 , num_hidden_layers=lowercase , num_attention_heads=lowercase , )
return config
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase=None , lowercase=None , lowercase="cpu" ) -> Optional[Any]:
__lowerCAmelCase = MusicGen.get_pretrained(lowercase , device=lowercase )
__lowerCAmelCase = decoder_config_from_checkpoint(lowercase )
__lowerCAmelCase = fairseq_model.lm.state_dict()
__lowerCAmelCase , __lowerCAmelCase = rename_state_dict(
lowercase , hidden_size=decoder_config.hidden_size )
__lowerCAmelCase = TaEncoderModel.from_pretrained("""t5-base""" )
__lowerCAmelCase = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
__lowerCAmelCase = MusicgenForCausalLM(lowercase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__lowerCAmelCase , __lowerCAmelCase = decoder.load_state_dict(lowercase , strict=lowercase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase )
if len(lowercase ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(lowercase ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
__lowerCAmelCase = MusicgenForConditionalGeneration(text_encoder=lowercase , audio_encoder=lowercase , decoder=lowercase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase )
# check we can do a forward pass
__lowerCAmelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__lowerCAmelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__lowerCAmelCase = model(input_ids=lowercase , decoder_input_ids=lowercase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
__lowerCAmelCase = AutoTokenizer.from_pretrained("""t5-base""" )
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
__lowerCAmelCase = MusicgenProcessor(feature_extractor=lowercase , tokenizer=lowercase )
# set the appropriate bos/pad token ids
__lowerCAmelCase = 2048
__lowerCAmelCase = 2048
# set other default generation config params
__lowerCAmelCase = int(30 * audio_encoder.config.frame_rate )
__lowerCAmelCase = True
__lowerCAmelCase = 3.0
if pytorch_dump_folder is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(lowercase )
processor.save_pretrained(lowercase )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(lowercase )
processor.push_to_hub(lowercase )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
_a : List[Any] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 689 | 0 |
from manim import *
class a ( lowercase__ ):
"""simple docstring"""
def UpperCAmelCase ( self : Optional[Any] ) -> str:
__UpperCAmelCase : Tuple = Rectangle(height=0.5 , width=0.5 )
__UpperCAmelCase : Optional[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__UpperCAmelCase : Any = [mem.copy() for i in range(6 )]
__UpperCAmelCase : str = [mem.copy() for i in range(6 )]
__UpperCAmelCase : Union[str, Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__UpperCAmelCase : Union[str, Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__UpperCAmelCase : List[str] = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
__UpperCAmelCase : Dict = Text("""CPU""" , font_size=24 )
__UpperCAmelCase : Union[str, Any] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowercase )
__UpperCAmelCase : List[str] = [mem.copy() for i in range(4 )]
__UpperCAmelCase : str = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__UpperCAmelCase : List[Any] = Text("""GPU""" , font_size=24 )
__UpperCAmelCase : Any = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
gpu.move_to([-1, -1, 0] )
self.add(__lowercase )
__UpperCAmelCase : Dict = [mem.copy() for i in range(6 )]
__UpperCAmelCase : int = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__UpperCAmelCase : List[Any] = Text("""Model""" , font_size=24 )
__UpperCAmelCase : str = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
model.move_to([3, -1.0, 0] )
self.add(__lowercase )
__UpperCAmelCase : Any = []
for i, rect in enumerate(__lowercase ):
rect.set_stroke(__lowercase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
__UpperCAmelCase : List[Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowercase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__lowercase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__lowercase , buff=0.0 )
self.add(__lowercase )
cpu_targs.append(__lowercase )
__UpperCAmelCase : Any = [mem.copy() for i in range(6 )]
__UpperCAmelCase : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__UpperCAmelCase : List[str] = Text("""Loaded Checkpoint""" , font_size=24 )
__UpperCAmelCase : List[str] = Group(__lowercase , __lowercase ).arrange(__lowercase , aligned_edge=__lowercase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
__UpperCAmelCase : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__UpperCAmelCase : int = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowercase , __lowercase )
__UpperCAmelCase : str = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
__UpperCAmelCase : Optional[Any] = MarkupText(
f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase ) , Write(__lowercase ) )
self.play(Write(__lowercase , run_time=1 ) , Create(__lowercase , run_time=1 ) )
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Dict = []
for i, rect in enumerate(__lowercase ):
__UpperCAmelCase : List[Any] = fill.copy().set_fill(__lowercase , opacity=0.7 )
target.move_to(__lowercase )
first_animations.append(GrowFromCenter(__lowercase , run_time=1 ) )
__UpperCAmelCase : Any = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__lowercase , run_time=1.5 ) )
self.play(*__lowercase )
self.play(*__lowercase )
self.wait()
| 63 |
'''simple docstring'''
from collections import deque
def _lowerCAmelCase ( lowercase ) -> Dict:
__lowerCAmelCase = len(lowercase )
__lowerCAmelCase = deque()
__lowerCAmelCase = [False for _ in range(lowercase )]
__lowerCAmelCase = [-1 for _ in range(lowercase )]
__lowerCAmelCase = index_of[:]
def strong_connect(lowercase , lowercase , lowercase ):
__lowerCAmelCase = index # the number when this node is seen
__lowerCAmelCase = index # lowest rank node reachable from here
index += 1
stack.append(lowercase )
__lowerCAmelCase = True
for w in g[v]:
if index_of[w] == -1:
__lowerCAmelCase = strong_connect(lowercase , lowercase , lowercase )
__lowerCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__lowerCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__lowerCAmelCase = []
__lowerCAmelCase = stack.pop()
__lowerCAmelCase = False
component.append(lowercase )
while w != v:
__lowerCAmelCase = stack.pop()
__lowerCAmelCase = False
component.append(lowercase )
components.append(lowercase )
return index
__lowerCAmelCase = []
for v in range(lowercase ):
if index_of[v] == -1:
strong_connect(lowercase , 0 , lowercase )
return components
def _lowerCAmelCase ( lowercase , lowercase ) -> str:
__lowerCAmelCase = [[] for _ in range(lowercase )]
for u, v in edges:
g[u].append(lowercase )
return g
if __name__ == "__main__":
# Test
_a : Any = 7
_a : Tuple = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_a : Optional[int] = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_a : Optional[Any] = [(u, v) for u, v in zip(source, target)]
_a : Optional[int] = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 689 | 0 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
lowercase_ : List[Any] = logging.get_logger(__name__)
class _lowerCamelCase ( UpperCamelCase_ ):
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ) -> None:
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
| 64 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ) -> Union[str, Any]:
__lowerCAmelCase = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
__lowerCAmelCase = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(lowercase )
# Let's go
__lowerCAmelCase = parser.parse_args()
if not hasattr(lowercase , """func""" ):
parser.print_help()
exit(1 )
# Run
__lowerCAmelCase = args.func(lowercase )
service.run()
if __name__ == "__main__":
main()
| 689 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase :
def __init__( self : List[str] ,A : List[Any] ,A : List[str]=13 ,A : Any=32 ,A : List[str]=3 ,A : Optional[int]=4 ,A : Optional[int]=[10, 20, 30, 40] ,A : str=[2, 2, 3, 2] ,A : Optional[Any]=True ,A : Dict=True ,A : Tuple=37 ,A : List[str]="gelu" ,A : Optional[int]=10 ,A : List[Any]=0.0_2 ,A : Optional[int]=["stage2", "stage3", "stage4"] ,A : List[Any]=[2, 3, 4] ,A : List[Any]=None ,):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Union[str, Any] = image_size
UpperCAmelCase__ : Any = num_channels
UpperCAmelCase__ : Optional[int] = num_stages
UpperCAmelCase__ : str = hidden_sizes
UpperCAmelCase__ : List[Any] = depths
UpperCAmelCase__ : str = is_training
UpperCAmelCase__ : Dict = use_labels
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : List[Any] = hidden_act
UpperCAmelCase__ : Optional[Any] = num_labels
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : List[Any] = out_features
UpperCAmelCase__ : Optional[Any] = out_indices
UpperCAmelCase__ : Any = scope
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Tuple = None
if self.use_labels:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size] ,self.num_labels )
UpperCAmelCase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : int ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=A ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def __lowercase ( self : str ,A : List[Any] ,A : Union[str, Any] ,A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = ConvNextVaModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowercase ( self : Union[str, Any] ,A : Union[str, Any] ,A : Optional[Any] ,A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ConvNextVaForImageClassification(A )
model.to(A )
model.eval()
UpperCAmelCase__ : Optional[int] = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : int ,A : Optional[int] ,A : Optional[int] ,A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ : Tuple = model(A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : str = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ : str = model(A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = config_and_inputs
UpperCAmelCase__ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = config_and_inputs
UpperCAmelCase__ : Dict = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class __lowercase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
snake_case_ = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = ConvNextVaModelTester(self )
UpperCAmelCase__ : Any = ConfigTester(self ,config_class=A ,has_text_modality=A ,hidden_size=37 )
def __lowercase ( self : List[str] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : List[str] ):
'''simple docstring'''
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def __lowercase ( self : str ):
'''simple docstring'''
pass
def __lowercase ( self : List[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase__ : int = True
if model_class.__name__ in [
*get_values(A ),
*get_values(A ),
]:
continue
UpperCAmelCase__ : Tuple = model_class(A )
model.to(A )
model.train()
UpperCAmelCase__ : List[Any] = self._prepare_for_class(A ,A ,return_labels=A )
UpperCAmelCase__ : Optional[int] = model(**A ).loss
loss.backward()
def __lowercase ( self : Tuple ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase__ : int = False
UpperCAmelCase__ : List[Any] = True
if (
model_class.__name__
in [*get_values(A ), *get_values(A )]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase__ : Dict = model_class(A )
model.to(A )
model.gradient_checkpointing_enable()
model.train()
UpperCAmelCase__ : Tuple = self._prepare_for_class(A ,A ,return_labels=A )
UpperCAmelCase__ : Optional[Any] = model(**A ).loss
loss.backward()
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = model_class(A )
UpperCAmelCase__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Optional[Any] = [*signature.parameters.keys()]
UpperCAmelCase__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,A )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def __lowercase ( self : Any ):
'''simple docstring'''
def check_hidden_states_output(A : Optional[Any] ,A : Union[str, Any] ,A : str ):
UpperCAmelCase__ : List[str] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : int = model(**self._prepare_for_class(A ,A ) )
UpperCAmelCase__ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase__ : List[str] = self.model_tester.num_stages
self.assertEqual(len(A ) ,expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = True
check_hidden_states_output(A ,A ,A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : Tuple = True
check_hidden_states_output(A ,A ,A )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = ConvNextVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def __lowercase ( self : int ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(A )
UpperCAmelCase__ : Any = self.default_image_processor
UpperCAmelCase__ : str = prepare_img()
UpperCAmelCase__ : List[Any] = preprocessor(images=A ,return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**A )
# verify the logits
UpperCAmelCase__ : List[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,A )
UpperCAmelCase__ : Optional[Any] = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A ,atol=1e-4 ) )
| 65 |
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_a : List[Any] = logging.get_logger(__name__)
_a : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
_a : Any = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> str:
for attribute in key.split(""".""" ):
__lowerCAmelCase = getattr(lowercase , lowercase )
if weight_type is not None:
__lowerCAmelCase = getattr(lowercase , lowercase ).shape
else:
__lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
__lowerCAmelCase = value
elif weight_type == "weight_g":
__lowerCAmelCase = value
elif weight_type == "weight_v":
__lowerCAmelCase = value
elif weight_type == "bias":
__lowerCAmelCase = value
else:
__lowerCAmelCase = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]:
__lowerCAmelCase = []
__lowerCAmelCase = fairseq_model.state_dict()
__lowerCAmelCase = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , )
__lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
__lowerCAmelCase = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split(""".""" )[:-1] ) != key):
# special case since naming is very similar
continue
__lowerCAmelCase = True
if "*" in mapped_key:
__lowerCAmelCase = name.split(lowercase )[0].split(""".""" )[-2]
__lowerCAmelCase = mapped_key.replace("""*""" , lowercase )
if "weight_g" in name:
__lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
__lowerCAmelCase = """weight_v"""
elif "bias" in name:
__lowerCAmelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase = """weight"""
else:
__lowerCAmelCase = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__lowerCAmelCase = full_name.split("""conv_layers.""" )[-1]
__lowerCAmelCase = name.split(""".""" )
__lowerCAmelCase = int(items[0] )
__lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Dict:
if config_path is not None:
__lowerCAmelCase = UniSpeechSatConfig.from_pretrained(lowercase )
else:
__lowerCAmelCase = UniSpeechSatConfig()
__lowerCAmelCase = """"""
if is_finetuned:
__lowerCAmelCase = UniSpeechSatForCTC(lowercase )
else:
__lowerCAmelCase = UniSpeechSatForPreTraining(lowercase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
__lowerCAmelCase = model[0].eval()
recursively_load_weights(lowercase , lowercase )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
_a : List[str] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_a : Union[str, Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 689 | 0 |
import heapq as hq
import math
from collections.abc import Iterator
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase ):
_lowercase : List[str] = str(id_ )
_lowercase : Tuple = None
_lowercase : Optional[Any] = None
_lowercase : List[Any] = []
_lowercase : List[str] = {} # {vertex:distance}
def __lt__( self , _lowerCAmelCase ):
return self.key < other.key
def __repr__( self ):
return self.id
def __a ( self , _lowerCAmelCase ):
self.neighbors.append(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = weight
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , SCREAMING_SNAKE_CASE )
graph[b - 1].add_edge(graph[a - 1] , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list:
_lowercase : str = []
for u in graph:
_lowercase : Optional[Any] = math.inf
_lowercase : List[str] = None
_lowercase : str = 0
_lowercase : Any = graph[:]
while q:
_lowercase : str = min(SCREAMING_SNAKE_CASE )
q.remove(SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_lowercase : Any = u
_lowercase : Dict = u.edges[v.id]
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Iterator[tuple]:
for u in graph:
_lowercase : Optional[Any] = math.inf
_lowercase : Optional[int] = None
_lowercase : Tuple = 0
_lowercase : Optional[int] = list(SCREAMING_SNAKE_CASE )
hq.heapify(SCREAMING_SNAKE_CASE )
while h:
_lowercase : List[Any] = hq.heappop(SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_lowercase : Any = u
_lowercase : List[Any] = u.edges[v.id]
hq.heapify(SCREAMING_SNAKE_CASE )
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __magic_name__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
_a : str = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
_a : Dict = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
_a : List[str] = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ),reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = spearmanr(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 689 | 0 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 50 ) -> int:
_lowercase = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""") | 67 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=lowerCAmelCase_ ):
a : List[str] =["""onnx"""]
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(self,["""onnx"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""onnx"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""onnx"""] )
| 689 | 0 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=13 , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Optional[Any]=99 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : int=5 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : str=37 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : str=512 , __SCREAMING_SNAKE_CASE : Dict=16 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=4 , ) -> Optional[Any]:
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =seq_length
__UpperCAmelCase =is_training
__UpperCAmelCase =use_attention_mask
__UpperCAmelCase =use_token_type_ids
__UpperCAmelCase =use_labels
__UpperCAmelCase =vocab_size
__UpperCAmelCase =hidden_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_act
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =max_position_embeddings
__UpperCAmelCase =type_vocab_size
__UpperCAmelCase =type_sequence_label_size
__UpperCAmelCase =initializer_range
__UpperCAmelCase =num_choices
def _a ( self : List[Any] ) -> List[str]:
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase =None
if self.use_attention_mask:
__UpperCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase =None
if self.use_token_type_ids:
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase =self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs
__UpperCAmelCase ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _a ( self : List[str] ) -> Dict:
__UpperCAmelCase =self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs
__UpperCAmelCase =True
__UpperCAmelCase =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _A ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : Union[str, Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a ( self : List[Any] ) -> List[str]:
__UpperCAmelCase =FlaxRobertaModelTester(self )
@slow
def _a ( self : Optional[Any] ) -> List[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase =model_class_name.from_pretrained("""roberta-base""" , from_pt=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 68 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_a : int = logging.get_logger(__name__)
_a : Optional[int] = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[str] ="""gptj"""
a : Optional[int] ={
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self,__SCREAMING_SNAKE_CASE=5_04_00,__SCREAMING_SNAKE_CASE=20_48,__SCREAMING_SNAKE_CASE=40_96,__SCREAMING_SNAKE_CASE=28,__SCREAMING_SNAKE_CASE=16,__SCREAMING_SNAKE_CASE=64,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="gelu_new",__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=1e-5,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=False,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_embd
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_inner
__lowerCAmelCase = rotary_dim
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = use_cache
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(
bos_token_id=__SCREAMING_SNAKE_CASE,eos_token_id=__SCREAMING_SNAKE_CASE,tie_word_embeddings=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = "default",__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False,):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE,task=__SCREAMING_SNAKE_CASE,patching_specs=__SCREAMING_SNAKE_CASE,use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config,"""pad_token_id""",__SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
__lowerCAmelCase = 0
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE,direction="""inputs""" )
__lowerCAmelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._config.n_head
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,):
'''simple docstring'''
__lowerCAmelCase = super(__SCREAMING_SNAKE_CASE,self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE,batch_size=__SCREAMING_SNAKE_CASE,seq_length=__SCREAMING_SNAKE_CASE,is_pair=__SCREAMING_SNAKE_CASE,framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowerCAmelCase = seqlen + 2
__lowerCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCAmelCase = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
__lowerCAmelCase = common_inputs["""attention_mask"""]
if self.use_past:
__lowerCAmelCase = ordered_inputs["""attention_mask"""].dtype
__lowerCAmelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,dtype=__SCREAMING_SNAKE_CASE )],dim=1 )
return ordered_inputs
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 13
| 689 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int = 50 ) -> int:
__snake_case = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 69 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase = 5000_0000 ) -> int:
__lowerCAmelCase = set()
__lowerCAmelCase = int((limit - 24) ** (1 / 2) )
__lowerCAmelCase = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowercase ) ) )
for primea in primes:
__lowerCAmelCase = primea * primea
for primea in primes:
__lowerCAmelCase = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
__lowerCAmelCase = primea * primea * primea * primea
__lowerCAmelCase = square + cube + tetr
if total >= limit:
break
ret.add(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(f'{solution() = }')
| 689 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Dict = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
lowerCamelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 70 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Optional[int] =TextToVideoSDPipeline
a : Optional[int] =TEXT_TO_IMAGE_PARAMS
a : Any =TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a : Union[str, Any] =frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D"""),up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D"""),cross_attention_dim=32,attention_head_dim=4,)
__lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0085,beta_end=0.012,beta_schedule="""scaled_linear""",clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,)
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],latent_channels=4,sample_size=1_28,)
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1e-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=10_00,hidden_act="""gelu""",projection_dim=5_12,)
__lowerCAmelCase = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = TextToVideoSDPipeline(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = """np"""
__lowerCAmelCase = sd_pipe(**__SCREAMING_SNAKE_CASE ).frames
__lowerCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__lowerCAmelCase = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available(),reason="""XFormers attention is only available with CUDA and `xformers` installed""",)
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
__lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowerCAmelCase = pipe.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=25,output_type="""pt""" ).frames
__lowerCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
__lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase = pipe.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=2,output_type="""pt""" ).frames
__lowerCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 689 | 0 |
'''simple docstring'''
import torch
from torch import nn
class _snake_case (nn.Module):
def __init__( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case=1 ,_snake_case=False ):
super().__init__()
UpperCAmelCase_ : List[Any] = n_token
UpperCAmelCase_ : int = d_embed
UpperCAmelCase_ : Optional[Any] = d_proj
UpperCAmelCase_ : Tuple = cutoffs + [n_token]
UpperCAmelCase_ : List[str] = [0] + self.cutoffs
UpperCAmelCase_ : str = div_val
UpperCAmelCase_ : List[str] = self.cutoffs[0]
UpperCAmelCase_ : Optional[int] = len(self.cutoffs ) - 1
UpperCAmelCase_ : Tuple = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.zeros(self.n_clusters ,self.d_embed ) )
UpperCAmelCase_ : Union[str, Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCAmelCase_ : str = nn.ModuleList()
UpperCAmelCase_ : List[Any] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_snake_case ,_snake_case ) ) )
else:
self.out_projs.append(_snake_case )
self.out_layers.append(nn.Linear(_snake_case ,_snake_case ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase_ : Union[str, Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_snake_case ,_snake_case ) ) )
self.out_layers.append(nn.Linear(_snake_case ,r_idx - l_idx ) )
UpperCAmelCase_ : Tuple = keep_order
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
if proj is None:
UpperCAmelCase_ : int = nn.functional.linear(_snake_case ,_snake_case ,bias=_snake_case )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCAmelCase_ : Tuple = nn.functional.linear(_snake_case ,proj.t().contiguous() )
UpperCAmelCase_ : Dict = nn.functional.linear(_snake_case ,_snake_case ,bias=_snake_case )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=None ,_snake_case=False ):
if labels is not None:
# Shift so that tokens < n predict n
UpperCAmelCase_ : Optional[Any] = hidden[..., :-1, :].contiguous()
UpperCAmelCase_ : Any = labels[..., 1:].contiguous()
UpperCAmelCase_ : Dict = hidden.view(-1 ,hidden.size(-1 ) )
UpperCAmelCase_ : Union[str, Any] = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("Input and labels should have the same size in the batch dimension." )
else:
UpperCAmelCase_ : Optional[Any] = hidden.view(-1 ,hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCAmelCase_ : Any = self._compute_logit(_snake_case ,self.out_layers[0].weight ,self.out_layers[0].bias ,self.out_projs[0] )
if labels is not None:
UpperCAmelCase_ : Any = labels != -1_00
UpperCAmelCase_ : Any = torch.zeros_like(_snake_case ,dtype=hidden.dtype ,device=hidden.device )
UpperCAmelCase_ : Union[str, Any] = (
-nn.functional.log_softmax(_snake_case ,dim=-1 )[mask].gather(1 ,labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCAmelCase_ : Optional[int] = nn.functional.log_softmax(_snake_case ,dim=-1 )
else:
# construct weights and biases
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase_ : Any = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase_ : List[str] = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase_ : Optional[Any] = self.out_layers[i].weight
UpperCAmelCase_ : str = self.out_layers[i].bias
if i == 0:
UpperCAmelCase_ : Union[str, Any] = torch.cat([weight_i, self.cluster_weight] ,dim=0 )
UpperCAmelCase_ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] ,dim=0 )
weights.append(_snake_case )
biases.append(_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase_ : Optional[Any] = self._compute_logit(_snake_case ,_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = nn.functional.log_softmax(_snake_case ,dim=1 )
if labels is None:
UpperCAmelCase_ : int = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCAmelCase_ : Any = torch.zeros_like(_snake_case ,dtype=hidden.dtype ,device=hidden.device )
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : Optional[Any] = [0] + self.cutoffs
for i in range(len(_snake_case ) - 1 ):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCAmelCase_ : int = (labels >= l_idx) & (labels < r_idx)
UpperCAmelCase_ : Any = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCAmelCase_ : Dict = labels.index_select(0 ,_snake_case ) - l_idx
UpperCAmelCase_ : List[Any] = head_logprob.index_select(0 ,_snake_case )
UpperCAmelCase_ : Optional[int] = hidden.index_select(0 ,_snake_case )
else:
UpperCAmelCase_ : Optional[Any] = hidden
if i == 0:
if labels is not None:
UpperCAmelCase_ : List[str] = head_logprob_i.gather(1 ,target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase_ : Optional[int] = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase_ : Union[str, Any] = self._compute_logit(_snake_case ,_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase_ : int = nn.functional.log_softmax(_snake_case ,dim=1 )
UpperCAmelCase_ : int = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCAmelCase_ : str = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 ,target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase_ : Union[str, Any] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCAmelCase_ : Optional[int] = logprob_i
if labels is not None:
if (hasattr(self ,"keep_order" ) and self.keep_order) or keep_order:
out.index_copy_(0 ,_snake_case ,-logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def UpperCamelCase__ ( self ,_snake_case ):
if self.n_clusters == 0:
UpperCAmelCase_ : Dict = self._compute_logit(_snake_case ,self.out_layers[0].weight ,self.out_layers[0].bias ,self.out_projs[0] )
return nn.functional.log_softmax(_snake_case ,dim=-1 )
else:
# construct weights and biases
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase_ : Tuple = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase_ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase_ : str = self.out_layers[i].weight
UpperCAmelCase_ : str = self.out_layers[i].bias
if i == 0:
UpperCAmelCase_ : List[Any] = torch.cat([weight_i, self.cluster_weight] ,dim=0 )
UpperCAmelCase_ : List[str] = torch.cat([bias_i, self.cluster_bias] ,dim=0 )
weights.append(_snake_case )
biases.append(_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase_ : List[Any] = self._compute_logit(_snake_case ,_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase_ : str = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCAmelCase_ : List[Any] = nn.functional.log_softmax(_snake_case ,dim=1 )
UpperCAmelCase_ : Optional[int] = [0] + self.cutoffs
for i in range(len(_snake_case ) - 1 ):
UpperCAmelCase_ , UpperCAmelCase_ : int = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCAmelCase_ : Optional[Any] = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase_ : int = self._compute_logit(_snake_case ,_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase_ : int = nn.functional.log_softmax(_snake_case ,dim=1 )
UpperCAmelCase_ : Tuple = head_logprob[:, -i] + tail_logprob_i
UpperCAmelCase_ : Union[str, Any] = logprob_i
return out
| 71 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _lowerCAmelCase ( lowercase ) -> Optional[int]:
if not is_accelerate_available():
return method
__lowerCAmelCase = version.parse(accelerate.__version__ ).base_version
if version.parse(lowercase ) < version.parse("""0.17.0""" ):
return method
def wrapper(self , *lowercase , **lowercase ):
if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ):
self._hf_hook.pre_forward(self )
return method(self , *lowercase , **lowercase )
return wrapper
| 689 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Any = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
_UpperCAmelCase : str = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
_UpperCAmelCase : List[str] = {
'''moussaKam/mbarthez''': 10_24,
'''moussaKam/barthez''': 10_24,
'''moussaKam/barthez-orangesum-title''': 10_24,
}
_UpperCAmelCase : Dict = '''▁'''
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ['input_ids', 'attention_mask']
UpperCamelCase__ = BarthezTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , **snake_case_ , ):
# Mask token behave like a normal word, i.e. include the space before it
lowercase =AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , **snake_case_ , )
lowercase =vocab_file
lowercase =False if not self.vocab_file else True
def _A( self , snake_case_ , snake_case_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase =[self.cls_token_id]
lowercase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _A( self , snake_case_ , snake_case_ = None ):
lowercase =[self.sep_token_id]
lowercase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _A( self , snake_case_ , snake_case_ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(snake_case_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase =os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,)
| 72 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
# load base model
__lowerCAmelCase = StableDiffusionPipeline.from_pretrained(lowercase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
__lowerCAmelCase = load_file(lowercase )
__lowerCAmelCase = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
__lowerCAmelCase = pipeline.text_encoder
else:
__lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
__lowerCAmelCase = pipeline.unet
# find the target layer
__lowerCAmelCase = layer_infos.pop(0 )
while len(lowercase ) > -1:
try:
__lowerCAmelCase = curr_layer.__getattr__(lowercase )
if len(lowercase ) > 0:
__lowerCAmelCase = layer_infos.pop(0 )
elif len(lowercase ) == 0:
break
except Exception:
if len(lowercase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
__lowerCAmelCase = layer_infos.pop(0 )
__lowerCAmelCase = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(lowercase )
else:
pair_keys.append(lowercase )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
__lowerCAmelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
__lowerCAmelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase ).unsqueeze(2 ).unsqueeze(3 )
else:
__lowerCAmelCase = state_dict[pair_keys[0]].to(torch.floataa )
__lowerCAmelCase = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase )
# update visited list
for item in pair_keys:
visited.append(lowercase )
return pipeline
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
_a : Optional[int] = parser.parse_args()
_a : Dict = args.base_model_path
_a : Optional[Any] = args.checkpoint_path
_a : Union[str, Any] = args.dump_path
_a : Optional[int] = args.lora_prefix_unet
_a : int = args.lora_prefix_text_encoder
_a : str = args.alpha
_a : Any = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_a : Tuple = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 689 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
a_ : Optional[Any] = pytest.mark.integration
@require_faiss
class _snake_case ( A__ ):
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(a) for x in np.arange(30).tolist()]})
return dset
def SCREAMING_SNAKE_CASE__ ( self) -> int:
import faiss
SCREAMING_SNAKE_CASE = self._create_dummy_dataset()
SCREAMING_SNAKE_CASE = dset.map(
lambda a , a: {"vecs": i * np.ones(5 , dtype=np.floataa)} , with_indices=a , keep_in_memory=a)
SCREAMING_SNAKE_CASE = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
dset.drop_index('vecs')
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
import faiss
SCREAMING_SNAKE_CASE = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5)) * np.arange(30).reshape(-1 , 1) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
import faiss
SCREAMING_SNAKE_CASE = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5)) * np.arange(30).reshape(-1 , 1) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=a) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name)
dset.load_faiss_index('vecs2' , tmp_file.name)
os.unlink(tmp_file.name)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5)) * np.arange(30).reshape(-1 , 1) , index_name='vecs')
dset.drop_index('vecs')
self.assertRaises(a , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa)))
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
from elasticsearch import Elasticsearch
SCREAMING_SNAKE_CASE = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
SCREAMING_SNAKE_CASE = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30)
SCREAMING_SNAKE_CASE = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
SCREAMING_SNAKE_CASE = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=a)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = dset.get_nearest_examples('filename' , 'my_name-train_29')
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
@require_faiss
class _snake_case ( A__ ):
def SCREAMING_SNAKE_CASE__ ( self) -> str:
import faiss
SCREAMING_SNAKE_CASE = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsNotNone(index.faiss_index)
self.assertEqual(index.faiss_index.ntotal , 5)
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa))
self.assertEqual(index.faiss_index.ntotal , 10)
# single query
SCREAMING_SNAKE_CASE = np.zeros(5 , dtype=np.floataa)
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = index.search(a)
self.assertRaises(a , index.search , query.reshape(-1 , 1))
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
# batched queries
SCREAMING_SNAKE_CASE = np.eye(5 , dtype=np.floataa)[::-1]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = index.search_batch(a)
self.assertRaises(a , index.search_batch , queries[0])
SCREAMING_SNAKE_CASE = [scores[0] for scores in total_scores]
SCREAMING_SNAKE_CASE = [indices[0] for indices in total_indices]
self.assertGreater(np.min(a) , 0)
self.assertListEqual([4, 3, 2, 1, 0] , a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
import faiss
SCREAMING_SNAKE_CASE = FaissIndex(string_factory='Flat')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
SCREAMING_SNAKE_CASE = FaissIndex(string_factory='LSH')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexLSH)
with self.assertRaises(a):
SCREAMING_SNAKE_CASE = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5))
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
import faiss
SCREAMING_SNAKE_CASE = faiss.IndexFlat(5)
SCREAMING_SNAKE_CASE = FaissIndex(custom_index=a)
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
import faiss
SCREAMING_SNAKE_CASE = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5 , dtype=np.floataa))
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=a) as tmp_file:
index.save(tmp_file.name)
SCREAMING_SNAKE_CASE = FaissIndex.load(tmp_file.name)
os.unlink(tmp_file.name)
SCREAMING_SNAKE_CASE = np.zeros(5 , dtype=np.floataa)
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = index.search(a)
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
@require_faiss
def lowerCamelCase__ (_UpperCAmelCase):
import faiss
SCREAMING_SNAKE_CASE = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5 , dtype=np.floataa))
SCREAMING_SNAKE_CASE = 'index.faiss'
SCREAMING_SNAKE_CASE = F'''mock://{index_name}'''
index.save(_UpperCAmelCase , storage_options=mockfs.storage_options)
SCREAMING_SNAKE_CASE = FaissIndex.load(_UpperCAmelCase , storage_options=mockfs.storage_options)
SCREAMING_SNAKE_CASE = np.zeros(5 , dtype=np.floataa)
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = index.search(_UpperCAmelCase)
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _snake_case ( A__ ):
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
SCREAMING_SNAKE_CASE = Elasticsearch()
SCREAMING_SNAKE_CASE = {'acknowledged': True}
SCREAMING_SNAKE_CASE = ElasticSearchIndex(es_client=a)
mocked_bulk.return_value([(True, None)] * 3)
index.add_documents(['foo', 'bar', 'foobar'])
# single query
SCREAMING_SNAKE_CASE = 'foo'
SCREAMING_SNAKE_CASE = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = index.search(a)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# single query with timeout
SCREAMING_SNAKE_CASE = 'foo'
SCREAMING_SNAKE_CASE = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = index.search(a , request_timeout=30)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# batched queries
SCREAMING_SNAKE_CASE = ['foo', 'bar', 'foobar']
SCREAMING_SNAKE_CASE = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = index.search_batch(a)
SCREAMING_SNAKE_CASE = [scores[0] for scores in total_scores]
SCREAMING_SNAKE_CASE = [indices[0] for indices in total_indices]
self.assertGreater(np.min(a) , 0)
self.assertListEqual([1, 1, 1] , a)
# batched queries with timeout
SCREAMING_SNAKE_CASE = ['foo', 'bar', 'foobar']
SCREAMING_SNAKE_CASE = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = index.search_batch(a , request_timeout=30)
SCREAMING_SNAKE_CASE = [scores[0] for scores in total_scores]
SCREAMING_SNAKE_CASE = [indices[0] for indices in total_indices]
self.assertGreater(np.min(a) , 0)
self.assertListEqual([1, 1, 1] , a)
| 73 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def _lowerCAmelCase ( lowercase = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2
def _lowerCAmelCase ( lowercase = "" ) -> bool:
if len(lowercase ) == 0:
return True
__lowerCAmelCase = input_str.replace(""" """ , """""" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__lowerCAmelCase = {}
for character in lower_case_input_str:
__lowerCAmelCase = character_freq_dict.get(lowercase , 0 ) + 1
__lowerCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _lowerCAmelCase ( lowercase = "" ) -> None:
print("""\nFor string = """ , lowercase , """:""" )
print(
"""> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(lowercase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
print(
"""> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(lowercase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
if __name__ == "__main__":
_a : int = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
_a : Optional[int] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
| 689 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = 384
if "tiny" in model_name:
__SCREAMING_SNAKE_CASE : Tuple = [3, 3, 9, 3]
__SCREAMING_SNAKE_CASE : Union[str, Any] = [96, 192, 384, 768]
if "small" in model_name:
__SCREAMING_SNAKE_CASE : int = [3, 3, 27, 3]
__SCREAMING_SNAKE_CASE : List[str] = [96, 192, 384, 768]
if "base" in model_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [3, 3, 27, 3]
__SCREAMING_SNAKE_CASE : List[Any] = [128, 256, 512, 1_024]
__SCREAMING_SNAKE_CASE : Tuple = 512
if "large" in model_name:
__SCREAMING_SNAKE_CASE : List[Any] = [3, 3, 27, 3]
__SCREAMING_SNAKE_CASE : Dict = [192, 384, 768, 1_536]
__SCREAMING_SNAKE_CASE : str = 768
if "xlarge" in model_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [3, 3, 27, 3]
__SCREAMING_SNAKE_CASE : List[str] = [256, 512, 1_024, 2_048]
__SCREAMING_SNAKE_CASE : str = 1_024
# set label information
__SCREAMING_SNAKE_CASE : List[str] = 150
__SCREAMING_SNAKE_CASE : Tuple = '''huggingface/label-files'''
__SCREAMING_SNAKE_CASE : List[Any] = '''ade20k-id2label.json'''
__SCREAMING_SNAKE_CASE : Tuple = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) )
__SCREAMING_SNAKE_CASE : Tuple = {int(snake_case ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Union[str, Any] = ConvNextConfig(
depths=snake_case , hidden_sizes=snake_case , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
__SCREAMING_SNAKE_CASE : int = UperNetConfig(
backbone_config=snake_case , auxiliary_in_channels=snake_case , num_labels=snake_case , idalabel=snake_case , labelaid=snake_case , )
return config
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') )
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = dct.pop(snake_case )
__SCREAMING_SNAKE_CASE : Optional[Any] = val
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
__SCREAMING_SNAKE_CASE : Tuple = model_name_to_url[model_name]
__SCREAMING_SNAKE_CASE : Dict = torch.hub.load_state_dict_from_url(snake_case , map_location='''cpu''' )['''state_dict''']
__SCREAMING_SNAKE_CASE : Dict = get_upernet_config(snake_case )
__SCREAMING_SNAKE_CASE : Dict = UperNetForSemanticSegmentation(snake_case )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__SCREAMING_SNAKE_CASE : List[Any] = state_dict.pop(snake_case )
if "bn" in key:
__SCREAMING_SNAKE_CASE : Dict = key.replace('''bn''' , '''batch_norm''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = val
# rename keys
__SCREAMING_SNAKE_CASE : int = create_rename_keys(snake_case )
for src, dest in rename_keys:
rename_key(snake_case , snake_case , snake_case )
model.load_state_dict(snake_case )
# verify on image
__SCREAMING_SNAKE_CASE : Optional[Any] = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
__SCREAMING_SNAKE_CASE : Any = Image.open(requests.get(snake_case , stream=snake_case ).raw ).convert('''RGB''' )
__SCREAMING_SNAKE_CASE : Tuple = SegformerImageProcessor()
__SCREAMING_SNAKE_CASE : Tuple = processor(snake_case , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = model(snake_case )
if model_name == "upernet-convnext-tiny":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
__SCREAMING_SNAKE_CASE : int = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(snake_case )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[f'''upernet-convnext-{size}''' for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowercase_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 74 |
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _lowerCAmelCase ( lowercase ) -> List[Any]:
__lowerCAmelCase = VideoMAEConfig()
set_architecture_configs(lowercase , lowercase )
if "finetuned" not in model_name:
__lowerCAmelCase = False
if "finetuned" in model_name:
__lowerCAmelCase = """huggingface/label-files"""
if "kinetics" in model_name:
__lowerCAmelCase = 400
__lowerCAmelCase = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
__lowerCAmelCase = 174
__lowerCAmelCase = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
__lowerCAmelCase = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="""dataset""" ) , """r""" ) )
__lowerCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase ( lowercase , lowercase ) -> Any:
if "small" in model_name:
__lowerCAmelCase = 384
__lowerCAmelCase = 1536
__lowerCAmelCase = 12
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 3
__lowerCAmelCase = 192
__lowerCAmelCase = 768
elif "large" in model_name:
__lowerCAmelCase = 1024
__lowerCAmelCase = 4096
__lowerCAmelCase = 24
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 8
__lowerCAmelCase = 512
__lowerCAmelCase = 2048
elif "huge" in model_name:
__lowerCAmelCase = 1280
__lowerCAmelCase = 5120
__lowerCAmelCase = 32
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 8
__lowerCAmelCase = 640
__lowerCAmelCase = 2560
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def _lowerCAmelCase ( lowercase ) -> List[str]:
if "encoder." in name:
__lowerCAmelCase = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
__lowerCAmelCase = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
__lowerCAmelCase = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
__lowerCAmelCase = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__lowerCAmelCase = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
__lowerCAmelCase = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
__lowerCAmelCase = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
__lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
__lowerCAmelCase = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
__lowerCAmelCase = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
__lowerCAmelCase = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
__lowerCAmelCase = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
__lowerCAmelCase = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__lowerCAmelCase = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__lowerCAmelCase = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
__lowerCAmelCase = name.replace("""head""" , """classifier""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]:
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(lowercase )
if key.startswith("""encoder.""" ):
__lowerCAmelCase = key.replace("""encoder.""" , """""" )
if "qkv" in key:
__lowerCAmelCase = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
__lowerCAmelCase = config.decoder_hidden_size
__lowerCAmelCase = int(key_split[2] )
__lowerCAmelCase = """decoder.decoder_layers."""
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = config.hidden_size
__lowerCAmelCase = int(key_split[1] )
__lowerCAmelCase = """videomae.encoder.layer."""
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val
return orig_state_dict
def _lowerCAmelCase ( ) -> str:
__lowerCAmelCase = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__lowerCAmelCase = np.load(lowercase )
return list(lowercase )
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__lowerCAmelCase = get_videomae_config(lowercase )
if "finetuned" in model_name:
__lowerCAmelCase = VideoMAEForVideoClassification(lowercase )
else:
__lowerCAmelCase = VideoMAEForPreTraining(lowercase )
# download original checkpoint, hosted on Google Drive
__lowerCAmelCase = """pytorch_model.bin"""
gdown.cached_download(lowercase , lowercase , quiet=lowercase )
__lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" )
if "model" in files:
__lowerCAmelCase = files["""model"""]
else:
__lowerCAmelCase = files["""module"""]
__lowerCAmelCase = convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
# verify model on basic input
__lowerCAmelCase = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__lowerCAmelCase = prepare_video()
__lowerCAmelCase = image_processor(lowercase , return_tensors="""pt""" )
if "finetuned" not in model_name:
__lowerCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
__lowerCAmelCase = torch.load(lowercase )
__lowerCAmelCase = model(**lowercase )
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
__lowerCAmelCase = torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(f'Model name not supported. Should be one of {model_names}' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , lowercase , atol=1e-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__lowerCAmelCase = outputs.loss
assert torch.allclose(lowercase , lowercase , atol=1e-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model and image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase )
model.save_pretrained(lowercase )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase , organization="""nielsr""" )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_a : int = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 689 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''nielsr/canine-s''': 2_0_4_8,
}
# Unicode defines 1,114,112 total “codepoints”
UpperCamelCase__ = 1_1_1_4_1_1_2
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
UpperCamelCase__ = 0
UpperCamelCase__ = 0Xe_000
UpperCamelCase__ = 0Xe_001
UpperCamelCase__ = 0Xe_002
UpperCamelCase__ = 0Xe_003
UpperCamelCase__ = 0Xe_004
# Maps special codepoints to human-readable names.
UpperCamelCase__ = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
UpperCamelCase__ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , _A : List[Any]=chr(_A ) , _A : Optional[Any]=chr(_A ) , _A : Dict=chr(_A ) , _A : Optional[int]=chr(_A ) , _A : Optional[int]=chr(_A ) , _A : int=chr(_A ) , _A : Tuple=False , _A : str=2_048 , **_A : Union[str, Any] , ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else bos_token
UpperCAmelCase__ : Dict = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else eos_token
UpperCAmelCase__ : Optional[int] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else sep_token
UpperCAmelCase__ : Optional[Any] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else cls_token
UpperCAmelCase__ : List[Any] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ : Any = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
super().__init__(
bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , model_max_length=_A , **_A , )
# Creates a mapping for looking up the IDs of special symbols.
UpperCAmelCase__ : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
UpperCAmelCase__ : int = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
UpperCAmelCase__ : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
UpperCAmelCase__ : Dict = UNICODE_VOCAB_SIZE
UpperCAmelCase__ : List[Any] = len(self._special_codepoints )
@property
def lowercase_ ( self : Dict ):
'''simple docstring'''
return self._unicode_vocab_size
def lowercase_ ( self : Tuple , _A : str ):
'''simple docstring'''
return list(_A )
def lowercase_ ( self : int , _A : str ):
'''simple docstring'''
try:
return ord(_A )
except TypeError:
raise ValueError(f"""invalid token: '{token}'""" )
def lowercase_ ( self : List[Any] , _A : int ):
'''simple docstring'''
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(_A )
except TypeError:
raise ValueError(f"""invalid id: {index}""" )
def lowercase_ ( self : Any , _A : Optional[Any] ):
'''simple docstring'''
return "".join(_A )
def lowercase_ ( self : List[Any] , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : str = [self.sep_token_id]
UpperCAmelCase__ : Any = [self.cls_token_id]
UpperCAmelCase__ : Tuple = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def lowercase_ ( self : Dict , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
UpperCAmelCase__ : Tuple = [1] + ([0] * len(_A )) + [1]
if token_ids_a is not None:
result += ([0] * len(_A )) + [1]
return result
def lowercase_ ( self : List[str] , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [self.sep_token_id]
UpperCAmelCase__ : int = [self.cls_token_id]
UpperCAmelCase__ : Union[str, Any] = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def lowercase_ ( self : Optional[Any] , _A : str , _A : Optional[str] = None ):
'''simple docstring'''
return ()
| 75 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_a : Tuple = """\
"""
_a : Tuple = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
_a : Optional[Any] = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ),reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = 16,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__lowerCAmelCase = """cuda"""
else:
__lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__lowerCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__SCREAMING_SNAKE_CASE ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__lowerCAmelCase = model.config.max_length - 1
else:
__lowerCAmelCase = model.config.max_length
__lowerCAmelCase = tokenizer(
__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,padding=__SCREAMING_SNAKE_CASE,truncation=__SCREAMING_SNAKE_CASE,max_length=__SCREAMING_SNAKE_CASE,return_tensors="""pt""",return_attention_mask=__SCREAMING_SNAKE_CASE,).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = encodings["""input_ids"""]
__lowerCAmelCase = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ),1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ),2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__lowerCAmelCase = []
__lowerCAmelCase = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0,len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase = min(start_index + batch_size,len(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = encoded_texts[start_index:end_index]
__lowerCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
__lowerCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch],dim=1 )
__lowerCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size(),dtype=torch.intaa ).to(__SCREAMING_SNAKE_CASE ), attn_mask],dim=1 )
__lowerCAmelCase = encoded_batch
with torch.no_grad():
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE ).logits
__lowerCAmelCase = out_logits[..., :-1, :].contiguous()
__lowerCAmelCase = labels[..., 1:].contiguous()
__lowerCAmelCase = attn_mask[..., 1:].contiguous()
__lowerCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1,2 ),__SCREAMING_SNAKE_CASE ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__SCREAMING_SNAKE_CASE )}
| 689 | 0 |
"""simple docstring"""
from typing import Dict
from .base import GenericTensor, Pipeline
class UpperCAmelCase_ ( snake_case ):
def _lowerCamelCase ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ ) -> Union[str, Any]:
if tokenize_kwargs is None:
__lowercase : Union[str, Any] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
__lowercase : str = truncation
__lowercase : Optional[int] = tokenize_kwargs
__lowercase : List[str] = {}
if return_tensors is not None:
__lowercase : Optional[int] = return_tensors
return preprocess_params, {}, postprocess_params
def _lowerCamelCase ( self , UpperCamelCase_ , **UpperCamelCase_ ) -> Dict[str, GenericTensor]:
__lowercase : List[str] = self.framework
__lowercase : Optional[int] = self.tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
return model_inputs
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Tuple:
__lowercase : Optional[Any] = self.model(**UpperCamelCase_ )
return model_outputs
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=False ) -> List[Any]:
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> int:
return super().__call__(*UpperCamelCase_ , **UpperCamelCase_ )
| 76 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Union[str, Any] =["""image_processor"""]
a : Dict ="""SamImageProcessor"""
def __init__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.image_processor
__lowerCAmelCase = -10
__lowerCAmelCase = self.image_processor.size["""longest_edge"""]
def __call__( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = self.image_processor(
__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
# pop arguments that are not used in the foward but used nevertheless
__lowerCAmelCase = encoding_image_processor["""original_sizes"""]
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks if Torch or TF tensor
__lowerCAmelCase = original_sizes.numpy()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._check_and_preprocess_points(
input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = self._normalize_and_convert(
__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,)
return encoding_image_processor
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="pt",):
'''simple docstring'''
if input_points is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0] ) for point in input_points
]
else:
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
for point, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__lowerCAmelCase , __lowerCAmelCase = self._pad_points_and_labels(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_labels is not None:
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0],is_bounding_box=__SCREAMING_SNAKE_CASE )
for box in input_boxes
]
else:
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,is_bounding_box=__SCREAMING_SNAKE_CASE )
for box, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
]
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
__lowerCAmelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points} )
if input_labels is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels} )
return encoding_image_processor
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = max([point.shape[0] for point in input_points] )
__lowerCAmelCase = []
for i, point in enumerate(__SCREAMING_SNAKE_CASE ):
if point.shape[0] != expected_nb_points:
__lowerCAmelCase = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value],axis=0 )
__lowerCAmelCase = np.append(input_labels[i],[self.point_pad_value] )
processed_input_points.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = processed_input_points
return input_points, input_labels
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = original_size
__lowerCAmelCase , __lowerCAmelCase = self.image_processor._get_preprocess_shape(__SCREAMING_SNAKE_CASE,longest_edge=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = deepcopy(__SCREAMING_SNAKE_CASE ).astype(__SCREAMING_SNAKE_CASE )
if is_bounding_box:
__lowerCAmelCase = coords.reshape(-1,2,2 )
__lowerCAmelCase = coords[..., 0] * (new_w / old_w)
__lowerCAmelCase = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__lowerCAmelCase = coords.reshape(-1,4 )
return coords
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,):
'''simple docstring'''
if input_points is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks for TF or Torch tensor
__lowerCAmelCase = input_points.numpy().tolist()
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_points[0],__SCREAMING_SNAKE_CASE ):
raise ValueError("""Input points must be a list of list of floating points.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for input_point in input_points]
else:
__lowerCAmelCase = None
if input_labels is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ):
__lowerCAmelCase = input_labels.numpy().tolist()
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_labels[0],__SCREAMING_SNAKE_CASE ):
raise ValueError("""Input labels must be a list of list integers.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for label in input_labels]
else:
__lowerCAmelCase = None
if input_boxes is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ):
__lowerCAmelCase = input_boxes.numpy().tolist()
if (
not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0],__SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0][0],__SCREAMING_SNAKE_CASE )
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa ) for box in input_boxes]
else:
__lowerCAmelCase = None
return input_points, input_labels, input_boxes
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(__SCREAMING_SNAKE_CASE ) )
def lowerCamelCase__ ( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.image_processor.post_process_masks(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
| 689 | 0 |
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A = logging.get_logger(__name__)
class a__ ( __magic_name__ ):
lowercase_ = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , UpperCamelCase_ : List[Any]="</s>" , UpperCamelCase_ : Tuple="<unk>" , UpperCamelCase_ : List[str]="<pad>" , UpperCamelCase_ : Union[str, Any]=125 , UpperCamelCase_ : Dict=None , **UpperCamelCase_ : Optional[Any] , ):
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
__UpperCAmelCase : int = [F"<extra_id_{i}>" for i in range(UpperCamelCase_)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__UpperCAmelCase : Dict = len(set(filter(lambda UpperCamelCase_: bool("extra_id" in str(UpperCamelCase_)) , UpperCamelCase_)))
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens")
__UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else pad_token
__UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else eos_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else unk_token
super().__init__(
eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , extra_ids=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , )
__UpperCAmelCase : List[str] = extra_ids
__UpperCAmelCase : int = 2**8 # utf is 8 bits
# define special tokens dict
__UpperCAmelCase : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__UpperCAmelCase : Any = len(self.special_tokens_encoder)
__UpperCAmelCase : List[Any] = len(UpperCamelCase_)
for i, token in enumerate(UpperCamelCase_):
__UpperCAmelCase : Union[str, Any] = self.vocab_size + i - n
__UpperCAmelCase : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def a_ ( self : List[Any]):
"""simple docstring"""
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def a_ ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(UpperCamelCase_)) + [1]
return ([0] * len(UpperCamelCase_)) + [1] + ([0] * len(UpperCamelCase_)) + [1]
def a_ ( self : Optional[Any] , UpperCamelCase_ : List[int]):
"""simple docstring"""
if len(UpperCamelCase_) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added.")
return token_ids
else:
return token_ids + [self.eos_token_id]
def a_ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None):
"""simple docstring"""
__UpperCAmelCase : Dict = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def a_ ( self : Optional[int] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self._add_eos_if_not_present(UpperCamelCase_)
if token_ids_a is None:
return token_ids_a
else:
__UpperCAmelCase : List[Any] = self._add_eos_if_not_present(UpperCamelCase_)
return token_ids_a + token_ids_a
def a_ ( self : List[str] , UpperCamelCase_ : str):
"""simple docstring"""
__UpperCAmelCase : Any = [chr(UpperCamelCase_) for i in text.encode("utf-8")]
return tokens
def a_ ( self : Tuple , UpperCamelCase_ : List[Any]):
"""simple docstring"""
if token in self.special_tokens_encoder:
__UpperCAmelCase : Any = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__UpperCAmelCase : int = self.added_tokens_encoder[token]
elif len(UpperCamelCase_) != 1:
__UpperCAmelCase : Optional[Any] = self.unk_token_id
else:
__UpperCAmelCase : Any = ord(UpperCamelCase_) + self._num_special_tokens
return token_id
def a_ ( self : Any , UpperCamelCase_ : List[str]):
"""simple docstring"""
if index in self.special_tokens_decoder:
__UpperCAmelCase : Any = self.special_tokens_decoder[index]
else:
__UpperCAmelCase : List[str] = chr(index - self._num_special_tokens)
return token
def a_ ( self : Dict , UpperCamelCase_ : int):
"""simple docstring"""
__UpperCAmelCase : str = b""
for token in tokens:
if token in self.special_tokens_decoder:
__UpperCAmelCase : Tuple = self.special_tokens_decoder[token].encode("utf-8")
elif token in self.added_tokens_decoder:
__UpperCAmelCase : Any = self.special_tokens_decoder[token].encode("utf-8")
elif token in self.special_tokens_encoder:
__UpperCAmelCase : Optional[int] = token.encode("utf-8")
elif token in self.added_tokens_encoder:
__UpperCAmelCase : Optional[Any] = token.encode("utf-8")
else:
__UpperCAmelCase : Any = bytes([ord(UpperCamelCase_)])
bstring += tok_string
__UpperCAmelCase : List[Any] = bstring.decode("utf-8" , errors="ignore")
return string
def a_ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None):
"""simple docstring"""
return ()
| 77 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
_a : int = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
a : Optional[str] =field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
a : Optional[str] =field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
a : int =field(
default=10_24 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
a : bool =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the training data."""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the validation data."""} )
a : Optional[str] =field(default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the test data."""} )
def lowerCamelCase__ ( self ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
__lowerCAmelCase = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__lowerCAmelCase = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _UpperCAmelCase :
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a : str =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def _lowerCAmelCase ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
__lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(lowercase )
datasets.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__lowerCAmelCase = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__lowerCAmelCase = data_args.train_file.split(""".""" )[-1]
__lowerCAmelCase = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__lowerCAmelCase = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(f'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
__lowerCAmelCase = load_dataset("""csv""" , data_files=lowercase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__lowerCAmelCase = load_dataset("""json""" , data_files=lowercase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__lowerCAmelCase = raw_datasets["""train"""].features["""label"""].names
__lowerCAmelCase = len(lowercase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__lowerCAmelCase = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowercase , )
__lowerCAmelCase = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__lowerCAmelCase = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__lowerCAmelCase = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__lowerCAmelCase = {"""Refused""": 0, """Entailed""": 1}
__lowerCAmelCase = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
__lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowercase ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowercase ):
__lowerCAmelCase = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
__lowerCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__lowerCAmelCase = examples["""statement"""]
__lowerCAmelCase = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
__lowerCAmelCase = tokenizer(lowercase , lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase )
__lowerCAmelCase = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
__lowerCAmelCase = raw_datasets.map(
lowercase , batched=lowercase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__lowerCAmelCase = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
__lowerCAmelCase = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__lowerCAmelCase = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
__lowerCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
__lowerCAmelCase = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
__lowerCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowercase ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase ):
__lowerCAmelCase = p.predictions[0] if isinstance(p.predictions , lowercase ) else p.predictions
__lowerCAmelCase = np.argmax(lowercase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__lowerCAmelCase = default_data_collator
elif training_args.fpaa:
__lowerCAmelCase = DataCollatorWithPadding(lowercase , pad_to_multiple_of=8 )
else:
__lowerCAmelCase = None
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=lowercase , args=lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase , tokenizer=lowercase , data_collator=lowercase , )
# Training
if training_args.do_train:
__lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowercase )
__lowerCAmelCase = train_result.metrics
__lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase )
)
__lowerCAmelCase = min(lowercase , len(lowercase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , lowercase )
trainer.save_metrics("""train""" , lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowerCAmelCase = trainer.evaluate(eval_dataset=lowercase )
__lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase )
__lowerCAmelCase = min(lowercase , len(lowercase ) )
trainer.log_metrics("""eval""" , lowercase )
trainer.save_metrics("""eval""" , lowercase )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__lowerCAmelCase = predict_dataset.remove_columns("""label""" )
__lowerCAmelCase = trainer.predict(lowercase , metric_key_prefix="""predict""" ).predictions
__lowerCAmelCase = np.argmax(lowercase , axis=1 )
__lowerCAmelCase = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(lowercase , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(lowercase ):
__lowerCAmelCase = label_list[item]
writer.write(f'{index}\t{item}\n' )
__lowerCAmelCase = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase )
else:
trainer.create_model_card(**lowercase )
def _lowerCAmelCase ( lowercase ) -> str:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 689 | 0 |
'''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert("RGB" )
UpperCAmelCase_ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
UpperCAmelCase_ = transform(snake_case_ ).unsqueeze(0 ).to(snake_case_ )
return image
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if "visual_encoder" in key:
UpperCAmelCase_ = re.sub("visual_encoder*" , "vision_model.encoder" , snake_case_ )
if "blocks" in key:
UpperCAmelCase_ = re.sub(R"blocks" , "layers" , snake_case_ )
if "attn" in key:
UpperCAmelCase_ = re.sub(R"attn" , "self_attn" , snake_case_ )
if "norm1" in key:
UpperCAmelCase_ = re.sub(R"norm1" , "layer_norm1" , snake_case_ )
if "norm2" in key:
UpperCAmelCase_ = re.sub(R"norm2" , "layer_norm2" , snake_case_ )
if "encoder.norm" in key:
UpperCAmelCase_ = re.sub(R"encoder.norm" , "post_layernorm" , snake_case_ )
if "encoder.patch_embed.proj" in key:
UpperCAmelCase_ = re.sub(R"encoder.patch_embed.proj" , "embeddings.patch_embedding" , snake_case_ )
if "encoder.pos_embed" in key:
UpperCAmelCase_ = re.sub(R"encoder.pos_embed" , "embeddings.position_embedding" , snake_case_ )
if "encoder.cls_token" in key:
UpperCAmelCase_ = re.sub(R"encoder.cls_token" , "embeddings.class_embedding" , snake_case_ )
if "self_attn" in key:
UpperCAmelCase_ = re.sub(R"self_attn.proj" , "self_attn.projection" , snake_case_ )
return key
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Any=None ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase_ = BlipConfig.from_pretrained(snake_case_ )
else:
UpperCAmelCase_ = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
UpperCAmelCase_ = BlipForConditionalGeneration(snake_case_ ).eval()
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
UpperCAmelCase_ = blip_decoder(pretrained=snake_case_ , image_size=3_84 , vit="base" )
UpperCAmelCase_ = pt_model.eval()
UpperCAmelCase_ = pt_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(snake_case_ )
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = value
hf_model.load_state_dict(snake_case_ )
UpperCAmelCase_ = 3_84
UpperCAmelCase_ = load_demo_image(image_size=snake_case_ , device="cpu" )
UpperCAmelCase_ = BertTokenizer.from_pretrained("bert-base-uncased" )
UpperCAmelCase_ = tokenizer(["a picture of"] ).input_ids
UpperCAmelCase_ = hf_model.generate(snake_case_ , snake_case_ )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
UpperCAmelCase_ = hf_model.generate(snake_case_ )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(snake_case_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
UpperCAmelCase_ = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
UpperCAmelCase_ = blip_vqa(pretrained=snake_case_ , image_size=snake_case_ , vit="base" )
vqa_model.eval()
UpperCAmelCase_ = vqa_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(snake_case_ )
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = BlipForQuestionAnswering(snake_case_ )
hf_vqa_model.load_state_dict(snake_case_ )
UpperCAmelCase_ = ["How many dogs are in this image?"]
UpperCAmelCase_ = tokenizer(snake_case_ , return_tensors="pt" ).input_ids
UpperCAmelCase_ = hf_vqa_model.generate(snake_case_ , snake_case_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" )
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
UpperCAmelCase_ = blip_itm(pretrained=snake_case_ , image_size=snake_case_ , vit="base" )
itm_model.eval()
UpperCAmelCase_ = itm_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(snake_case_ )
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = BlipForImageTextRetrieval(snake_case_ )
UpperCAmelCase_ = ["A picture of a woman with a dog sitting in a beach"]
UpperCAmelCase_ = tokenizer(
snake_case_ , return_tensors="pt" , padding="max_length" , truncation=snake_case_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(snake_case_ )
hf_itm_model.eval()
UpperCAmelCase_ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
UpperCAmelCase_ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[Any] =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
SCREAMING_SNAKE_CASE_: int =parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 78 |
'''simple docstring'''
import os
import sys
import unittest
_a : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_a : Union[str, Any] = os.path.join(git_repo_path, """src""", """diffusers""")
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = find_backend(""" if not is_torch_available():""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__lowerCAmelCase = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__lowerCAmelCase = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch_and_transformers_and_onnx""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""",__SCREAMING_SNAKE_CASE )
self.assertIn("""torch_and_transformers""",__SCREAMING_SNAKE_CASE )
self.assertIn("""flax_and_transformers""",__SCREAMING_SNAKE_CASE )
self.assertIn("""torch_and_transformers_and_onnx""",__SCREAMING_SNAKE_CASE )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""",objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""",objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""",objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""",objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""",objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""",objects["""torch_and_transformers_and_onnx"""] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = create_dummy_object("""CONSTANT""","""'torch'""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""\nCONSTANT = None\n""" )
__lowerCAmelCase = create_dummy_object("""function""","""'torch'""" )
self.assertEqual(
__SCREAMING_SNAKE_CASE,"""\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
__lowerCAmelCase = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
__lowerCAmelCase = create_dummy_object("""FakeClass""","""'torch'""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
__lowerCAmelCase = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""],__SCREAMING_SNAKE_CASE )
| 689 | 0 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase_ ( unittest.TestCase ):
@property
def __UpperCAmelCase ( self ):
torch.manual_seed(0 )
UpperCAmelCase__ : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = self.dummy_uncond_unet
UpperCAmelCase__ : Union[str, Any] = ScoreSdeVeScheduler()
UpperCAmelCase__ : Union[str, Any] = ScoreSdeVePipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
sde_ve.to(_lowerCAmelCase )
sde_ve.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase__ : Any = torch.manual_seed(0 )
UpperCAmelCase__ : Union[str, Any] = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=_lowerCAmelCase ).images
UpperCAmelCase__ : Tuple = torch.manual_seed(0 )
UpperCAmelCase__ : Any = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=_lowerCAmelCase , return_dict=_lowerCAmelCase )[
0
]
UpperCAmelCase__ : Any = image[0, -3:, -3:, -1]
UpperCAmelCase__ : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase__ : Optional[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[int] = """google/ncsnpp-church-256"""
UpperCAmelCase__ : str = UNetaDModel.from_pretrained(_lowerCAmelCase )
UpperCAmelCase__ : int = ScoreSdeVeScheduler.from_pretrained(_lowerCAmelCase )
UpperCAmelCase__ : Tuple = ScoreSdeVePipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
sde_ve.to(_lowerCAmelCase )
sde_ve.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase__ : List[str] = torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=_lowerCAmelCase ).images
UpperCAmelCase__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase__ : List[Any] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 79 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase ) -> tuple[int, int]:
try:
__lowerCAmelCase = float(lowercase )
except ValueError:
raise ValueError("""Please enter a valid number""" )
__lowerCAmelCase = decimal - int(lowercase )
if fractional_part == 0:
return int(lowercase ), 1
else:
__lowerCAmelCase = len(str(lowercase ).split(""".""" )[1] )
__lowerCAmelCase = int(decimal * (10**number_of_frac_digits) )
__lowerCAmelCase = 10**number_of_frac_digits
__lowerCAmelCase , __lowerCAmelCase = denominator, numerator
while True:
__lowerCAmelCase = dividend % divisor
if remainder == 0:
break
__lowerCAmelCase , __lowerCAmelCase = divisor, remainder
__lowerCAmelCase , __lowerCAmelCase = numerator / divisor, denominator / divisor
return int(lowercase ), int(lowercase )
if __name__ == "__main__":
print(f'{decimal_to_fraction(2) = }')
print(f'{decimal_to_fraction(89.0) = }')
print(f'{decimal_to_fraction("67") = }')
print(f'{decimal_to_fraction("45.0") = }')
print(f'{decimal_to_fraction(1.5) = }')
print(f'{decimal_to_fraction("6.25") = }')
print(f'{decimal_to_fraction("78td") = }')
| 689 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCamelCase : Dict = {
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 80 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_a : Dict = _symbol_database.Default()
_a : Union[str, Any] = _descriptor_pool.Default().AddSerializedFile(
b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
_a : str = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_a : str = None
_a : Union[str, Any] = b"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_a : Optional[int] = 4_5
_a : List[Any] = 1_5_8_1
_a : str = 1_5_1_7
_a : Optional[Any] = 1_5_7_0
_a : List[str] = 1_5_8_4
_a : List[Any] = 1_7_9_3
_a : Union[str, Any] = 1_7_9_5
_a : Tuple = 1_9_1_6
_a : List[Any] = 1_8_6_4
_a : Any = 1_9_0_5
_a : Optional[Any] = 1_9_1_9
_a : Optional[int] = 2_4_2_9
_a : Tuple = 2_2_0_8
_a : Optional[Any] = 2_4_1_8
_a : List[Any] = 2_3_2_3
_a : str = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 689 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case : List[str] = logging.get_logger(__name__)
_snake_case : str = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : str = "deformable_detr"
__UpperCAmelCase : List[str] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Optional[int] , lowerCamelCase : int=True , lowerCamelCase : Dict=None , lowerCamelCase : Optional[Any]=3 , lowerCamelCase : str=300 , lowerCamelCase : Tuple=1024 , lowerCamelCase : Dict=6 , lowerCamelCase : Any=1024 , lowerCamelCase : List[Any]=8 , lowerCamelCase : Any=6 , lowerCamelCase : Dict=1024 , lowerCamelCase : Dict=8 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : List[str]=True , lowerCamelCase : List[str]="relu" , lowerCamelCase : Optional[Any]=256 , lowerCamelCase : Tuple=0.1 , lowerCamelCase : List[Any]=0.0 , lowerCamelCase : Dict=0.0 , lowerCamelCase : List[str]=0.02 , lowerCamelCase : Optional[Any]=1.0 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Any=False , lowerCamelCase : int="sine" , lowerCamelCase : Union[str, Any]="resnet50" , lowerCamelCase : int=True , lowerCamelCase : List[str]=False , lowerCamelCase : int=4 , lowerCamelCase : str=4 , lowerCamelCase : List[str]=4 , lowerCamelCase : Any=False , lowerCamelCase : Any=300 , lowerCamelCase : Dict=False , lowerCamelCase : List[Any]=1 , lowerCamelCase : Optional[int]=5 , lowerCamelCase : Any=2 , lowerCamelCase : Union[str, Any]=1 , lowerCamelCase : Optional[int]=1 , lowerCamelCase : Optional[Any]=5 , lowerCamelCase : List[str]=2 , lowerCamelCase : int=0.1 , lowerCamelCase : List[str]=0.25 , lowerCamelCase : Optional[int]=False , **lowerCamelCase : int , ) -> Dict:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__snake_case : Optional[Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : Optional[Any] = backbone_config.get("model_type" )
__snake_case : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
__snake_case : str = config_class.from_dict(lowerCamelCase )
__snake_case : Union[str, Any] = use_timm_backbone
__snake_case : List[str] = backbone_config
__snake_case : Optional[int] = num_channels
__snake_case : List[Any] = num_queries
__snake_case : Tuple = max_position_embeddings
__snake_case : Tuple = d_model
__snake_case : List[str] = encoder_ffn_dim
__snake_case : Tuple = encoder_layers
__snake_case : Tuple = encoder_attention_heads
__snake_case : Dict = decoder_ffn_dim
__snake_case : int = decoder_layers
__snake_case : Optional[int] = decoder_attention_heads
__snake_case : Any = dropout
__snake_case : List[Any] = attention_dropout
__snake_case : List[str] = activation_dropout
__snake_case : str = activation_function
__snake_case : Optional[int] = init_std
__snake_case : Any = init_xavier_std
__snake_case : Optional[Any] = encoder_layerdrop
__snake_case : Union[str, Any] = auxiliary_loss
__snake_case : List[Any] = position_embedding_type
__snake_case : List[str] = backbone
__snake_case : Tuple = use_pretrained_backbone
__snake_case : Dict = dilation
# deformable attributes
__snake_case : Any = num_feature_levels
__snake_case : List[Any] = encoder_n_points
__snake_case : List[Any] = decoder_n_points
__snake_case : int = two_stage
__snake_case : Any = two_stage_num_proposals
__snake_case : List[Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
__snake_case : str = class_cost
__snake_case : Union[str, Any] = bbox_cost
__snake_case : str = giou_cost
# Loss coefficients
__snake_case : int = mask_loss_coefficient
__snake_case : List[str] = dice_loss_coefficient
__snake_case : Any = bbox_loss_coefficient
__snake_case : List[Any] = giou_loss_coefficient
__snake_case : Optional[Any] = eos_coefficient
__snake_case : Optional[Any] = focal_alpha
__snake_case : int = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCamelCase , **lowerCamelCase )
@property
def __snake_case ( self : Optional[int] ) -> int:
return self.encoder_attention_heads
@property
def __snake_case ( self : Dict ) -> int:
return self.d_model
def __snake_case ( self : Tuple ) -> Optional[int]:
__snake_case : Union[str, Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__snake_case : Union[str, Any] = self.backbone_config.to_dict()
__snake_case : Optional[Any] = self.__class__.model_type
return output
| 81 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : torch.FloatTensor
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE=True,):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = layers_per_block
__lowerCAmelCase = torch.nn.Convad(
__SCREAMING_SNAKE_CASE,block_out_channels[0],kernel_size=3,stride=1,padding=1,)
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
# down
__lowerCAmelCase = block_out_channels[0]
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = block_out_channels[i]
__lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase = get_down_block(
__SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,add_downsample=not is_final_block,resnet_eps=1e-6,downsample_padding=0,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
__lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""",attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
# out
__lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[-1],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = 2 * out_channels if double_z else out_channels
__lowerCAmelCase = nn.Convad(block_out_channels[-1],__SCREAMING_SNAKE_CASE,3,padding=1 )
__lowerCAmelCase = False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = x
__lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(""">=""","""1.11.0""" ):
for down_block in self.down_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
__lowerCAmelCase = down_block(__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE )
# post-process
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE="group",):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = layers_per_block
__lowerCAmelCase = nn.Convad(
__SCREAMING_SNAKE_CASE,block_out_channels[-1],kernel_size=3,stride=1,padding=1,)
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
__lowerCAmelCase = in_channels if norm_type == """spatial""" else None
# mid
__lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type,attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
# up
__lowerCAmelCase = list(reversed(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = reversed_block_out_channels[i]
__lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase = get_up_block(
__SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block + 1,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,prev_output_channel=__SCREAMING_SNAKE_CASE,add_upsample=not is_final_block,resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,resnet_time_scale_shift=__SCREAMING_SNAKE_CASE,)
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = output_channel
# out
if norm_type == "spatial":
__lowerCAmelCase = SpatialNorm(block_out_channels[0],__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[0],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = nn.Convad(block_out_channels[0],__SCREAMING_SNAKE_CASE,3,padding=1 )
__lowerCAmelCase = False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
__lowerCAmelCase = z
__lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(""">=""","""1.11.0""" ):
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
else:
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
else:
# middle
__lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = up_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="random",__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = n_e
__lowerCAmelCase = vq_embed_dim
__lowerCAmelCase = beta
__lowerCAmelCase = legacy
__lowerCAmelCase = nn.Embedding(self.n_e,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e,1.0 / self.n_e )
__lowerCAmelCase = remap
if self.remap is not None:
self.register_buffer("""used""",torch.tensor(np.load(self.remap ) ) )
__lowerCAmelCase = self.used.shape[0]
__lowerCAmelCase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__lowerCAmelCase = self.re_embed
__lowerCAmelCase = self.re_embed + 1
print(
f'Remapping {self.n_e} indices to {self.re_embed} indices. '
f'Using {self.unknown_index} for unknown indices.' )
else:
__lowerCAmelCase = n_e
__lowerCAmelCase = sane_index_shape
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
__lowerCAmelCase = inds.reshape(ishape[0],-1 )
__lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = (inds[:, :, None] == used[None, None, ...]).long()
__lowerCAmelCase = match.argmax(-1 )
__lowerCAmelCase = match.sum(2 ) < 1
if self.unknown_index == "random":
__lowerCAmelCase = torch.randint(0,self.re_embed,size=new[unknown].shape ).to(device=new.device )
else:
__lowerCAmelCase = self.unknown_index
return new.reshape(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
__lowerCAmelCase = inds.reshape(ishape[0],-1 )
__lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
__lowerCAmelCase = 0 # simply set to zero
__lowerCAmelCase = torch.gather(used[None, :][inds.shape[0] * [0], :],1,__SCREAMING_SNAKE_CASE )
return back.reshape(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = z.permute(0,2,3,1 ).contiguous()
__lowerCAmelCase = z.view(-1,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__lowerCAmelCase = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE,self.embedding.weight ),dim=1 )
__lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape )
__lowerCAmelCase = None
__lowerCAmelCase = None
# compute loss for embedding
if not self.legacy:
__lowerCAmelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__lowerCAmelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__lowerCAmelCase = z + (z_q - z).detach()
# reshape back to match original input shape
__lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous()
if self.remap is not None:
__lowerCAmelCase = min_encoding_indices.reshape(z.shape[0],-1 ) # add batch axis
__lowerCAmelCase = self.remap_to_used(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = min_encoding_indices.reshape(-1,1 ) # flatten
if self.sane_index_shape:
__lowerCAmelCase = min_encoding_indices.reshape(z_q.shape[0],z_q.shape[2],z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if self.remap is not None:
__lowerCAmelCase = indices.reshape(shape[0],-1 ) # add batch axis
__lowerCAmelCase = self.unmap_to_all(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE )
if shape is not None:
__lowerCAmelCase = z_q.view(__SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
__lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous()
return z_q
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = parameters
__lowerCAmelCase , __lowerCAmelCase = torch.chunk(__SCREAMING_SNAKE_CASE,2,dim=1 )
__lowerCAmelCase = torch.clamp(self.logvar,-30.0,20.0 )
__lowerCAmelCase = deterministic
__lowerCAmelCase = torch.exp(0.5 * self.logvar )
__lowerCAmelCase = torch.exp(self.logvar )
if self.deterministic:
__lowerCAmelCase = __lowerCAmelCase = torch.zeros_like(
self.mean,device=self.parameters.device,dtype=self.parameters.dtype )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
__lowerCAmelCase = randn_tensor(
self.mean.shape,generator=__SCREAMING_SNAKE_CASE,device=self.parameters.device,dtype=self.parameters.dtype )
__lowerCAmelCase = self.mean + self.std * sample
return x
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean,2 ) + self.var - 1.0 - self.logvar,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar,dim=[1, 2, 3],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
__lowerCAmelCase = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean,2 ) / self.var,dim=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.mean
| 689 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , "tf_padding" ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , "depth_multiplier" ) )
class lowercase__ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str=13 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : List[Any]=32 , _UpperCAmelCase : Any=0.25 , _UpperCAmelCase : Optional[int]=8 , _UpperCAmelCase : Union[str, Any]=8 , _UpperCAmelCase : Tuple=6 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : int="relu6" , _UpperCAmelCase : Optional[int]=1280 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Optional[int]=10 , _UpperCAmelCase : Optional[Any]=None , ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = depth_multiplier
UpperCAmelCase_ = depth_divisible_by
UpperCAmelCase_ = min_depth
UpperCAmelCase_ = expand_ratio
UpperCAmelCase_ = tf_padding
UpperCAmelCase_ = output_stride
UpperCAmelCase_ = first_layer_is_expansion
UpperCAmelCase_ = finegrained_output
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
UpperCAmelCase_ = classifier_dropout_prob
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase__ ( self : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = MobileNetVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowercase__ ( self : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MobileNetVaForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MobileNetVaForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowercase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = MobileNetVaModelTester(self )
UpperCAmelCase_ = MobileNetVaConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def lowercase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV2 does not use inputs_embeds" )
def lowercase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV2 does not support input and output embeddings" )
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV2 does not output attentions" )
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
pass
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowercase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : Any ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ):
UpperCAmelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = 16
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
@slow
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = MobileNetVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def a__ ( ):
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self : Any ) -> Dict:
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224" ) if is_vision_available() else None
)
@slow
def lowercase__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224" ).to(_UpperCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor([0.2445, -1.1993, 0.1905] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = MobileNetVaForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
UpperCAmelCase_ = model.to(_UpperCAmelCase )
UpperCAmelCase_ = MobileNetVaImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
UpperCAmelCase_ = outputs.logits
# verify the logits
UpperCAmelCase_ = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 82 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_a : Optional[int] = logging.get_logger(__name__)
_a : int = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_a : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(lowerCAmelCase_ )} )
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
a : int =field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : int =field(
default=1_28 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
a : int =field(
default=64 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
a : int =field(
default=30 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
a : float =field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a : int =field(
default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a : int =field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
a : int =field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Optional[Any] ="""train"""
a : Optional[int] ="""dev"""
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : SquadDataTrainingArguments
a : List[SquadFeatures]
a : Split
a : bool
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = Split.train,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = "pt",):
'''simple docstring'''
__lowerCAmelCase = args
__lowerCAmelCase = is_language_sensitive
__lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
try:
__lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
__lowerCAmelCase = mode
# Load data features from cache or dataset file
__lowerCAmelCase = """v2""" if args.version_2_with_negative else """v1"""
__lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase = cached_features_file + """.lock"""
with FileLock(__SCREAMING_SNAKE_CASE ):
if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache:
__lowerCAmelCase = time.time()
__lowerCAmelCase = torch.load(__SCREAMING_SNAKE_CASE )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__lowerCAmelCase = self.old_features["""features"""]
__lowerCAmelCase = self.old_features.get("""dataset""",__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.old_features.get("""examples""",__SCREAMING_SNAKE_CASE )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
""" future run""" )
else:
if mode == Split.dev:
__lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
__lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
__lowerCAmelCase , __lowerCAmelCase = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__SCREAMING_SNAKE_CASE,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples},__SCREAMING_SNAKE_CASE,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.features[i]
__lowerCAmelCase = torch.tensor(feature.input_ids,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.attention_mask,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.token_type_ids,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.cls_index,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.p_mask,dtype=torch.float )
__lowerCAmelCase = torch.tensor(feature.is_impossible,dtype=torch.float )
__lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__lowerCAmelCase = torch.tensor(feature.start_position,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 689 | 0 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __snake_case ( _lowercase , _lowercase , _lowercase):
@register_to_config
def __init__( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : float , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : bool = False , ):
"""simple docstring"""
super().__init__()
_lowerCamelCase : str = nn.Embedding(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Dict = nn.Embedding(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : str = False
_lowerCamelCase : List[Any] = nn.Dropout(p=__lowerCAmelCase )
_lowerCamelCase : Tuple = TaConfig(
vocab_size=__lowerCAmelCase , d_model=__lowerCAmelCase , num_heads=__lowerCAmelCase , d_kv=__lowerCAmelCase , d_ff=__lowerCAmelCase , dropout_rate=__lowerCAmelCase , feed_forward_proj=__lowerCAmelCase , is_decoder=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , )
_lowerCamelCase : Union[str, Any] = nn.ModuleList()
for lyr_num in range(__lowerCAmelCase ):
_lowerCamelCase : str = TaBlock(__lowerCAmelCase )
self.encoders.append(__lowerCAmelCase )
_lowerCamelCase : List[Any] = TaLayerNorm(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = nn.Dropout(p=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : int = self.token_embedder(__lowerCAmelCase )
_lowerCamelCase : List[Any] = encoder_input_tokens.shape[1]
_lowerCamelCase : int = torch.arange(__lowerCAmelCase , device=encoder_input_tokens.device )
x += self.position_encoding(__lowerCAmelCase )
_lowerCamelCase : Any = self.dropout_pre(__lowerCAmelCase )
# inverted the attention mask
_lowerCamelCase : List[str] = encoder_input_tokens.size()
_lowerCamelCase : Optional[int] = self.get_extended_attention_mask(__lowerCAmelCase , __lowerCAmelCase )
for lyr in self.encoders:
_lowerCamelCase : Union[str, Any] = lyr(__lowerCAmelCase , __lowerCAmelCase )[0]
_lowerCamelCase : Any = self.layer_norm(__lowerCAmelCase )
return self.dropout_post(__lowerCAmelCase ), encoder_inputs_mask
| 83 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _lowerCAmelCase ( lowercase ) -> Optional[Any]:
# vision encoder
if "img_encoder.pos_embed" in name:
__lowerCAmelCase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
__lowerCAmelCase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
__lowerCAmelCase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
__lowerCAmelCase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
__lowerCAmelCase = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
__lowerCAmelCase = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
__lowerCAmelCase = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
__lowerCAmelCase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
__lowerCAmelCase = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
__lowerCAmelCase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
__lowerCAmelCase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
__lowerCAmelCase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
__lowerCAmelCase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
__lowerCAmelCase = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
__lowerCAmelCase = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
__lowerCAmelCase = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
__lowerCAmelCase = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
__lowerCAmelCase = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
__lowerCAmelCase = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
__lowerCAmelCase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
__lowerCAmelCase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
__lowerCAmelCase = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
__lowerCAmelCase = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> Dict:
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(lowercase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase = key.split(""".""" )
__lowerCAmelCase , __lowerCAmelCase = int(key_split[2] ), int(key_split[4] )
__lowerCAmelCase = config.vision_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase = key.split(""".""" )
__lowerCAmelCase = int(key_split[3] )
__lowerCAmelCase = config.text_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[
dim : dim * 2, :
]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
else:
__lowerCAmelCase = rename_key(lowercase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
__lowerCAmelCase = val.squeeze_()
else:
__lowerCAmelCase = val
return orig_state_dict
def _lowerCAmelCase ( ) -> str:
__lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCAmelCase = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase , lowercase="groupvit-gcc-yfcc" , lowercase=False ) -> List[Any]:
__lowerCAmelCase = GroupViTConfig()
__lowerCAmelCase = GroupViTModel(lowercase ).eval()
__lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" )["""model"""]
__lowerCAmelCase = convert_state_dict(lowercase , lowercase )
__lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowercase , strict=lowercase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowercase ) == 0)
# verify result
__lowerCAmelCase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowercase , padding=lowercase , return_tensors="""pt""" )
with torch.no_grad():
__lowerCAmelCase = model(**lowercase )
if model_name == "groupvit-gcc-yfcc":
__lowerCAmelCase = torch.tensor([[13.35_23, 6.36_29]] )
elif model_name == "groupvit-gcc-redcaps":
__lowerCAmelCase = torch.tensor([[16.18_73, 8.62_30]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , lowercase , atol=1e-3 )
processor.save_pretrained(lowercase )
model.save_pretrained(lowercase )
print("""Successfully saved processor and model to""" , lowercase )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowercase , organization="""nielsr""" )
model.push_to_hub(lowercase , organization="""nielsr""" )
if __name__ == "__main__":
_a : int = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
_a : List[str] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 689 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_a : Tuple = logging.get_logger(__name__)
_a : Optional[int] = ["""model.decoder.embed_positions.weights"""]
def _lowerCAmelCase ( lowercase ) -> Optional[Any]:
if "emb" in name:
__lowerCAmelCase = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
__lowerCAmelCase = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
__lowerCAmelCase = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
__lowerCAmelCase = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
__lowerCAmelCase = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
__lowerCAmelCase = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
__lowerCAmelCase = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
__lowerCAmelCase = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
__lowerCAmelCase = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
__lowerCAmelCase = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> Tuple[Dict, Dict]:
__lowerCAmelCase = list(state_dict.keys() )
__lowerCAmelCase = {}
for key in keys:
__lowerCAmelCase = state_dict.pop(lowercase )
__lowerCAmelCase = rename_keys(lowercase )
if "in_proj_weight" in key:
# split fused qkv proj
__lowerCAmelCase = val[:hidden_size, :]
__lowerCAmelCase = val[hidden_size : 2 * hidden_size, :]
__lowerCAmelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__lowerCAmelCase = val
else:
__lowerCAmelCase = val
return state_dict, enc_dec_proj_state_dict
def _lowerCAmelCase ( lowercase ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
__lowerCAmelCase = 1024
__lowerCAmelCase = 24
__lowerCAmelCase = 16
elif checkpoint == "medium":
__lowerCAmelCase = 1536
__lowerCAmelCase = 48
__lowerCAmelCase = 24
elif checkpoint == "large":
__lowerCAmelCase = 2048
__lowerCAmelCase = 48
__lowerCAmelCase = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
__lowerCAmelCase = MusicgenDecoderConfig(
hidden_size=lowercase , ffn_dim=hidden_size * 4 , num_hidden_layers=lowercase , num_attention_heads=lowercase , )
return config
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase=None , lowercase=None , lowercase="cpu" ) -> Optional[Any]:
__lowerCAmelCase = MusicGen.get_pretrained(lowercase , device=lowercase )
__lowerCAmelCase = decoder_config_from_checkpoint(lowercase )
__lowerCAmelCase = fairseq_model.lm.state_dict()
__lowerCAmelCase , __lowerCAmelCase = rename_state_dict(
lowercase , hidden_size=decoder_config.hidden_size )
__lowerCAmelCase = TaEncoderModel.from_pretrained("""t5-base""" )
__lowerCAmelCase = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
__lowerCAmelCase = MusicgenForCausalLM(lowercase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__lowerCAmelCase , __lowerCAmelCase = decoder.load_state_dict(lowercase , strict=lowercase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase )
if len(lowercase ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(lowercase ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
__lowerCAmelCase = MusicgenForConditionalGeneration(text_encoder=lowercase , audio_encoder=lowercase , decoder=lowercase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase )
# check we can do a forward pass
__lowerCAmelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__lowerCAmelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__lowerCAmelCase = model(input_ids=lowercase , decoder_input_ids=lowercase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
__lowerCAmelCase = AutoTokenizer.from_pretrained("""t5-base""" )
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
__lowerCAmelCase = MusicgenProcessor(feature_extractor=lowercase , tokenizer=lowercase )
# set the appropriate bos/pad token ids
__lowerCAmelCase = 2048
__lowerCAmelCase = 2048
# set other default generation config params
__lowerCAmelCase = int(30 * audio_encoder.config.frame_rate )
__lowerCAmelCase = True
__lowerCAmelCase = 3.0
if pytorch_dump_folder is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(lowercase )
processor.save_pretrained(lowercase )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(lowercase )
processor.push_to_hub(lowercase )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
_a : List[Any] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 689 | 0 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class snake_case :
lowercase_ = BlenderbotSmallConfig
lowercase_ = {}
lowercase_ = 'gelu'
def __init__( self : List[Any] , a_ : int , a_ : Any=13 , a_ : List[str]=7 , a_ : Optional[int]=True , a_ : Tuple=False , a_ : Optional[int]=99 , a_ : Tuple=32 , a_ : Union[str, Any]=2 , a_ : Union[str, Any]=4 , a_ : str=37 , a_ : List[Any]=0.1 , a_ : int=0.1 , a_ : List[Any]=20 , a_ : Optional[Any]=2 , a_ : List[str]=1 , a_ : Tuple=0 , )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = seq_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE__ : Tuple = use_labels
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] = eos_token_id
SCREAMING_SNAKE_CASE__ : Any = pad_token_id
SCREAMING_SNAKE_CASE__ : Optional[int] = bos_token_id
def __lowercase( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_blenderbot_small_inputs_dict(a_ , a_ , a_ )
return config, inputs_dict
def __lowercase( self : Dict , a_ : Dict , a_ : str )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFBlenderbotSmallModel(config=a_ ).get_decoder()
SCREAMING_SNAKE_CASE__ : Optional[int] = inputs_dict['input_ids']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_ids[:1, :]
SCREAMING_SNAKE_CASE__ : List[str] = inputs_dict['attention_mask'][:1, :]
SCREAMING_SNAKE_CASE__ : Any = inputs_dict['head_mask']
SCREAMING_SNAKE_CASE__ : str = 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ , attention_mask=a_ , head_mask=a_ , use_cache=a_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(a_ , attention_mask=a_ )[0]
SCREAMING_SNAKE_CASE__ : int = model(a_ , attention_mask=a_ , past_key_values=a_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__ : Any = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(a_ , a_ , rtol=1e-3 )
def _a ( lowercase__ : Any , lowercase__ : Any , lowercase__ : List[str] , lowercase__ : Any=None , lowercase__ : str=None , lowercase__ : int=None , lowercase__ : str=None , lowercase__ : Tuple=None , ):
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase_ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
lowercase_ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
lowercase_ = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase_ = True
lowercase_ = False
lowercase_ = False
def __lowercase( self : Any )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFBlenderbotSmallModelTester(self )
SCREAMING_SNAKE_CASE__ : Dict = ConfigTester(self , config_class=a_ )
def __lowercase( self : Optional[Any] )-> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowercase( self : List[str] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*a_ )
@require_tokenizers
@require_tf
class snake_case ( unittest.TestCase ):
lowercase_ = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
lowercase_ = 'facebook/blenderbot_small-90M'
@cached_property
def __lowercase( self : int )-> Any:
"""simple docstring"""
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
@cached_property
def __lowercase( self : Optional[int] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __lowercase( self : str )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.tokenizer(self.src_text , return_tensors='tf' )
SCREAMING_SNAKE_CASE__ : int = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=a_ , )
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=a_ )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 85 |
'''simple docstring'''
from collections import deque
def _lowerCAmelCase ( lowercase ) -> Dict:
__lowerCAmelCase = len(lowercase )
__lowerCAmelCase = deque()
__lowerCAmelCase = [False for _ in range(lowercase )]
__lowerCAmelCase = [-1 for _ in range(lowercase )]
__lowerCAmelCase = index_of[:]
def strong_connect(lowercase , lowercase , lowercase ):
__lowerCAmelCase = index # the number when this node is seen
__lowerCAmelCase = index # lowest rank node reachable from here
index += 1
stack.append(lowercase )
__lowerCAmelCase = True
for w in g[v]:
if index_of[w] == -1:
__lowerCAmelCase = strong_connect(lowercase , lowercase , lowercase )
__lowerCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__lowerCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__lowerCAmelCase = []
__lowerCAmelCase = stack.pop()
__lowerCAmelCase = False
component.append(lowercase )
while w != v:
__lowerCAmelCase = stack.pop()
__lowerCAmelCase = False
component.append(lowercase )
components.append(lowercase )
return index
__lowerCAmelCase = []
for v in range(lowercase ):
if index_of[v] == -1:
strong_connect(lowercase , 0 , lowercase )
return components
def _lowerCAmelCase ( lowercase , lowercase ) -> str:
__lowerCAmelCase = [[] for _ in range(lowercase )]
for u, v in edges:
g[u].append(lowercase )
return g
if __name__ == "__main__":
# Test
_a : Any = 7
_a : Tuple = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_a : Optional[int] = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_a : Optional[Any] = [(u, v) for u, v in zip(source, target)]
_a : Optional[int] = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 689 | 0 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
__a :Optional[Any] = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
__a :List[Any] = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = (images / 2 + 0.5).clamp(0 ,1 )
A_ = images.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
A_ = numpy_to_pil(__UpperCamelCase )
return images
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if images.ndim == 3:
A_ = images[None, ...]
A_ = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
A_ = [Image.fromarray(image.squeeze() ,mode="L" ) for image in images]
else:
A_ = [Image.fromarray(__UpperCamelCase ) for image in images]
return pil_images | 86 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ) -> Union[str, Any]:
__lowerCAmelCase = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
__lowerCAmelCase = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(lowercase )
# Let's go
__lowerCAmelCase = parser.parse_args()
if not hasattr(lowercase , """func""" ):
parser.print_help()
exit(1 )
# Run
__lowerCAmelCase = args.func(lowercase )
service.run()
if __name__ == "__main__":
main()
| 689 | 0 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = (PNDMScheduler,)
UpperCAmelCase__ = (('''num_inference_steps''', 50),)
def SCREAMING_SNAKE_CASE ( self : Dict , **UpperCAmelCase__ : Optional[int]) ->Optional[Any]:
'''simple docstring'''
A__ = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**UpperCAmelCase__)
return config
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : int=0 , **UpperCAmelCase__ : str) ->Tuple:
'''simple docstring'''
A__ = dict(self.forward_default_kwargs)
A__ = kwargs.pop('''num_inference_steps''' , UpperCAmelCase__)
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config(**UpperCAmelCase__)
A__ = scheduler_class(**UpperCAmelCase__)
scheduler.set_timesteps(UpperCAmelCase__)
# copy over dummy past residuals
A__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase__)
A__ = scheduler_class.from_pretrained(UpperCAmelCase__)
new_scheduler.set_timesteps(UpperCAmelCase__)
# copy over dummy past residuals
A__ = dummy_past_residuals[:]
A__ = scheduler.step_prk(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__).prev_sample
A__ = new_scheduler.step_prk(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
A__ = scheduler.step_plms(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__).prev_sample
A__ = new_scheduler.step_plms(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Union[str, Any]=0 , **UpperCAmelCase__ : Union[str, Any]) ->List[str]:
'''simple docstring'''
A__ = dict(self.forward_default_kwargs)
A__ = kwargs.pop('''num_inference_steps''' , UpperCAmelCase__)
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
scheduler.set_timesteps(UpperCAmelCase__)
# copy over dummy past residuals (must be after setting timesteps)
A__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase__)
A__ = scheduler_class.from_pretrained(UpperCAmelCase__)
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase__)
# copy over dummy past residual (must be after setting timesteps)
A__ = dummy_past_residuals[:]
A__ = scheduler.step_prk(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__).prev_sample
A__ = new_scheduler.step_prk(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
A__ = scheduler.step_plms(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__).prev_sample
A__ = new_scheduler.step_plms(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self : Any , **UpperCAmelCase__ : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**UpperCAmelCase__)
A__ = scheduler_class(**UpperCAmelCase__)
A__ = 10
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase__)
for i, t in enumerate(scheduler.prk_timesteps):
A__ = model(UpperCAmelCase__ , UpperCAmelCase__)
A__ = scheduler.step_prk(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
A__ = model(UpperCAmelCase__ , UpperCAmelCase__)
A__ = scheduler.step_plms(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__).prev_sample
return sample
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->str:
'''simple docstring'''
A__ = dict(self.forward_default_kwargs)
A__ = kwargs.pop('''num_inference_steps''' , UpperCAmelCase__)
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
A__ = self.dummy_sample
A__ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCAmelCase__ , '''set_timesteps'''):
scheduler.set_timesteps(UpperCAmelCase__)
elif num_inference_steps is not None and not hasattr(UpperCAmelCase__ , '''set_timesteps'''):
A__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A__ = dummy_past_residuals[:]
A__ = scheduler.step_prk(UpperCAmelCase__ , 0 , UpperCAmelCase__ , **UpperCAmelCase__).prev_sample
A__ = scheduler.step_prk(UpperCAmelCase__ , 1 , UpperCAmelCase__ , **UpperCAmelCase__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
A__ = scheduler.step_plms(UpperCAmelCase__ , 0 , UpperCAmelCase__ , **UpperCAmelCase__).prev_sample
A__ = scheduler.step_plms(UpperCAmelCase__ , 1 , UpperCAmelCase__ , **UpperCAmelCase__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
'''simple docstring'''
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCAmelCase__)
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(steps_offset=1)
A__ = scheduler_class(**UpperCAmelCase__)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]) , )
def SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=UpperCAmelCase__ , beta_end=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[int]:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]):
self.check_over_forward(num_inference_steps=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->int:
'''simple docstring'''
A__ = 27
for scheduler_class in self.scheduler_classes:
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
scheduler.set_timesteps(UpperCAmelCase__)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
A__ = scheduler.step_prk(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__).prev_sample
def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase__):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[Any]:
'''simple docstring'''
A__ = self.full_loop()
A__ = torch.sum(torch.abs(UpperCAmelCase__))
A__ = torch.mean(torch.abs(UpperCAmelCase__))
assert abs(result_sum.item() - 198.1318) < 1e-2
assert abs(result_mean.item() - 0.2580) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Any) ->Any:
'''simple docstring'''
A__ = self.full_loop(prediction_type='''v_prediction''')
A__ = torch.sum(torch.abs(UpperCAmelCase__))
A__ = torch.mean(torch.abs(UpperCAmelCase__))
assert abs(result_sum.item() - 67.3986) < 1e-2
assert abs(result_mean.item() - 0.0878) < 1e-3
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
'''simple docstring'''
A__ = self.full_loop(set_alpha_to_one=UpperCAmelCase__ , beta_start=0.01)
A__ = torch.sum(torch.abs(UpperCAmelCase__))
A__ = torch.mean(torch.abs(UpperCAmelCase__))
assert abs(result_sum.item() - 230.0399) < 1e-2
assert abs(result_mean.item() - 0.2995) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
'''simple docstring'''
A__ = self.full_loop(set_alpha_to_one=UpperCAmelCase__ , beta_start=0.01)
A__ = torch.sum(torch.abs(UpperCAmelCase__))
A__ = torch.mean(torch.abs(UpperCAmelCase__))
assert abs(result_sum.item() - 186.9482) < 1e-2
assert abs(result_mean.item() - 0.2434) < 1e-3
| 87 |
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_a : List[Any] = logging.get_logger(__name__)
_a : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
_a : Any = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> str:
for attribute in key.split(""".""" ):
__lowerCAmelCase = getattr(lowercase , lowercase )
if weight_type is not None:
__lowerCAmelCase = getattr(lowercase , lowercase ).shape
else:
__lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
__lowerCAmelCase = value
elif weight_type == "weight_g":
__lowerCAmelCase = value
elif weight_type == "weight_v":
__lowerCAmelCase = value
elif weight_type == "bias":
__lowerCAmelCase = value
else:
__lowerCAmelCase = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]:
__lowerCAmelCase = []
__lowerCAmelCase = fairseq_model.state_dict()
__lowerCAmelCase = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , )
__lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
__lowerCAmelCase = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split(""".""" )[:-1] ) != key):
# special case since naming is very similar
continue
__lowerCAmelCase = True
if "*" in mapped_key:
__lowerCAmelCase = name.split(lowercase )[0].split(""".""" )[-2]
__lowerCAmelCase = mapped_key.replace("""*""" , lowercase )
if "weight_g" in name:
__lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
__lowerCAmelCase = """weight_v"""
elif "bias" in name:
__lowerCAmelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase = """weight"""
else:
__lowerCAmelCase = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__lowerCAmelCase = full_name.split("""conv_layers.""" )[-1]
__lowerCAmelCase = name.split(""".""" )
__lowerCAmelCase = int(items[0] )
__lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Dict:
if config_path is not None:
__lowerCAmelCase = UniSpeechSatConfig.from_pretrained(lowercase )
else:
__lowerCAmelCase = UniSpeechSatConfig()
__lowerCAmelCase = """"""
if is_finetuned:
__lowerCAmelCase = UniSpeechSatForCTC(lowercase )
else:
__lowerCAmelCase = UniSpeechSatForPreTraining(lowercase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
__lowerCAmelCase = model[0].eval()
recursively_load_weights(lowercase , lowercase )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
_a : List[str] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_a : Union[str, Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 689 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class lowercase__ ( A_ ):
__UpperCAmelCase = '''markuplm'''
def __init__( self , SCREAMING_SNAKE_CASE=3_0522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-1_2 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=1024 , SCREAMING_SNAKE_CASE=216 , SCREAMING_SNAKE_CASE=1001 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=50 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> int:
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
_lowerCamelCase : str = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : Optional[int] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Optional[int] = type_vocab_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Tuple = layer_norm_eps
_lowerCamelCase : str = position_embedding_type
_lowerCamelCase : List[Any] = use_cache
_lowerCamelCase : Tuple = classifier_dropout
# additional properties
_lowerCamelCase : Tuple = max_depth
_lowerCamelCase : List[Any] = max_xpath_tag_unit_embeddings
_lowerCamelCase : Optional[int] = max_xpath_subs_unit_embeddings
_lowerCamelCase : Tuple = tag_pad_id
_lowerCamelCase : int = subs_pad_id
_lowerCamelCase : Optional[int] = xpath_unit_hidden_size
| 88 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
_a : str = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
_a : Dict = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
_a : List[str] = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ),reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = spearmanr(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 689 | 0 |
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
SCREAMING_SNAKE_CASE : Optional[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE : str = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
SCREAMING_SNAKE_CASE : Any = spec.loader.load_module()
SCREAMING_SNAKE_CASE : Tuple = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
SCREAMING_SNAKE_CASE : str = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
SCREAMING_SNAKE_CASE : Dict = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def UpperCamelCase_( ) -> List[Any]:
_lowercase : Optional[Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
_lowercase : Any = False
# source code of `config_class`
_lowercase : str = inspect.getsource(lowerCamelCase_ )
_lowercase : Optional[Any] = _re_checkpoint.findall(lowerCamelCase_ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_lowercase , _lowercase : List[str] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_lowercase : Union[str, Any] = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
_lowercase : List[Any] = True
break
_lowercase : List[Any] = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
_lowercase : Union[str, Any] = '\n'.join(sorted(lowerCamelCase_ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 89 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=lowerCAmelCase_ ):
a : List[str] =["""onnx"""]
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(self,["""onnx"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""onnx"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""onnx"""] )
| 689 | 0 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _snake_case ( ) -> Tuple:
lowerCAmelCase__ = HfArgumentParser(A )
lowerCAmelCase__ = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase__ = TensorFlowBenchmark(args=A )
try:
lowerCAmelCase__ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowerCAmelCase__ = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowerCAmelCase__ = ''' '''.join(str(A ).split(''' ''' )[:-1] )
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = eval(str(A ).split(''' ''' )[-1] )
lowerCAmelCase__ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(A )
if len(A ) > 0:
lowerCAmelCase__ = full_error_msg + begin_error_msg + str(A )
raise ValueError(A )
benchmark.run()
if __name__ == "__main__":
main() | 90 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_a : int = logging.get_logger(__name__)
_a : Optional[int] = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[str] ="""gptj"""
a : Optional[int] ={
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self,__SCREAMING_SNAKE_CASE=5_04_00,__SCREAMING_SNAKE_CASE=20_48,__SCREAMING_SNAKE_CASE=40_96,__SCREAMING_SNAKE_CASE=28,__SCREAMING_SNAKE_CASE=16,__SCREAMING_SNAKE_CASE=64,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="gelu_new",__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=1e-5,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=False,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_embd
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_inner
__lowerCAmelCase = rotary_dim
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = use_cache
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(
bos_token_id=__SCREAMING_SNAKE_CASE,eos_token_id=__SCREAMING_SNAKE_CASE,tie_word_embeddings=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = "default",__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False,):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE,task=__SCREAMING_SNAKE_CASE,patching_specs=__SCREAMING_SNAKE_CASE,use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config,"""pad_token_id""",__SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
__lowerCAmelCase = 0
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE,direction="""inputs""" )
__lowerCAmelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._config.n_head
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,):
'''simple docstring'''
__lowerCAmelCase = super(__SCREAMING_SNAKE_CASE,self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE,batch_size=__SCREAMING_SNAKE_CASE,seq_length=__SCREAMING_SNAKE_CASE,is_pair=__SCREAMING_SNAKE_CASE,framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowerCAmelCase = seqlen + 2
__lowerCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCAmelCase = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
__lowerCAmelCase = common_inputs["""attention_mask"""]
if self.use_past:
__lowerCAmelCase = ordered_inputs["""attention_mask"""].dtype
__lowerCAmelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,dtype=__SCREAMING_SNAKE_CASE )],dim=1 )
return ordered_inputs
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 13
| 689 | 0 |
"""simple docstring"""
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
_lowercase = float('''nan''')
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[str] ,A_ : Tuple ) -> Any:
A = sys.stdout
A = open(A_ ,'a' )
def __getattr__( self : int ,A_ : Optional[Any] ) -> Tuple:
return getattr(self.stdout ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ) -> str:
self.stdout.write(A_ )
# strip tqdm codes
self.file.write(re.sub(R'^.*\r' ,'' ,A_ ,0 ,re.M ) )
def _snake_case ( snake_case__ : Optional[Any]=80 , snake_case__ : List[str]=False ):
A = []
# deal with critical env vars
A = ['CUDA_VISIBLE_DEVICES']
for key in env_keys:
A = os.environ.get(snake_case__ , snake_case__ )
if val is not None:
cmd.append(F'{key}={val}' )
# python executable (not always needed if the script is executable)
A = sys.executable if full_python_path else sys.executable.split('/' )[-1]
cmd.append(snake_case__ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
A = []
A = ''
while len(snake_case__ ) > 0:
current_line += F'{cmd.pop(0 )} '
if len(snake_case__ ) == 0 or len(snake_case__ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(snake_case__ )
A = ''
return "\\\n".join(snake_case__ )
def _snake_case ( snake_case__ : str , snake_case__ : str ):
# unwrap multi-line input
A = re.sub(r'[\\\n]+' , ' ' , args.base_cmd )
# remove --output_dir if any and set our own
A = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd )
args.base_cmd += F' --output_dir {output_dir}'
# ensure we have --overwrite_output_dir
A = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def _snake_case ( snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] ):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )} , )
A = subprocess.run(snake_case__ , capture_output=snake_case__ , text=snake_case__ )
if verbose:
print('STDOUT' , result.stdout )
print('STDERR' , result.stderr )
# save the streams
A = variation.replace(' ' , '-' )
with open(Path(snake_case__ ) / F'log.{prefix}.stdout.txt' , 'w' ) as f:
f.write(result.stdout )
with open(Path(snake_case__ ) / F'log.{prefix}.stderr.txt' , 'w' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('failed' )
return {target_metric_key: nan}
with io.open(F'{output_dir}/all_results.json' , 'r' , encoding='utf-8' ) as f:
A = json.load(snake_case__ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _snake_case ( snake_case__ : str , snake_case__ : str , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Optional[Any] , ):
A = []
A = []
A = F'{id}: {variation:<{longest_variation_len}}'
A = F'{preamble}: '
A = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(snake_case__ ) , desc=snake_case__ , leave=snake_case__ ):
A = process_run_single(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
A = single_run_metrics[target_metric_key]
if not math.isnan(snake_case__ ):
metrics.append(snake_case__ )
results.append(snake_case__ )
outcome += "✓"
else:
outcome += "✘"
A = F'\33[2K\r{outcome}'
if len(snake_case__ ) > 0:
A = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
A = round(mean_metrics[target_metric_key] , 2 )
A = F'{outcome} {mean_target}'
if len(snake_case__ ) > 1:
results_str += F' {tuple(round(snake_case__ , 2 ) for x in results )}'
print(snake_case__ )
A = variation
return mean_metrics
else:
print(snake_case__ )
return {variation_key: variation, target_metric_key: nan}
def _snake_case ( ):
A = torch.cuda.get_device_properties(torch.device('cuda' ) )
return F'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n'
def _snake_case ( snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Union[str, Any] ):
A = pd.DataFrame(snake_case__ )
A = 'variation'
A = 'diff_%'
A = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
A = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(snake_case__ ):
# as a fallback, use the minimal value as the sentinel
A = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(snake_case__ ):
A = df.apply(
lambda snake_case__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='columns' , )
# re-order columns
A = [variation_key, target_metric_key, diff_key, *report_metric_keys]
A = df.reindex(snake_case__ , axis='columns' ) # reorder cols
# capitalize
A = df.rename(str.capitalize , axis='columns' )
# make the cols as narrow as possible
A = df.rename(lambda snake_case__ : c.replace('_' , '<br>' ) , axis='columns' )
A = df.rename(lambda snake_case__ : c.replace('_' , '\n' ) , axis='columns' )
A = ['', 'Copy between the cut-here-lines and paste as is to github or a forum']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=snake_case__ , floatfmt='.2f' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=snake_case__ , floatfmt='.2f' )]
print('\n\n'.join(snake_case__ ) )
def _snake_case ( ):
A = argparse.ArgumentParser()
parser.add_argument(
'--base-cmd' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Base cmd' , )
parser.add_argument(
'--variations' , default=snake_case__ , type=snake_case__ , nargs='+' , required=snake_case__ , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , )
parser.add_argument(
'--base-variation' , default=snake_case__ , type=snake_case__ , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , )
parser.add_argument(
'--target-metric-key' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , )
parser.add_argument(
'--report-metric-keys' , default='' , type=snake_case__ , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , )
parser.add_argument(
'--repeat-times' , default=1 , type=snake_case__ , help='How many times to re-run each variation - an average will be reported' , )
parser.add_argument(
'--output_dir' , default='output_benchmark' , type=snake_case__ , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , )
parser.add_argument(
'--verbose' , default=snake_case__ , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , )
A = parser.parse_args()
A = args.output_dir
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
A = get_base_command(snake_case__ , snake_case__ )
# split each dimension into its --foo variations
A = [list(map(str.strip , re.split(r'\|' , snake_case__ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
A = list(map(str.strip , map(' '.join , itertools.product(*snake_case__ ) ) ) )
A = max(len(snake_case__ ) for x in variations )
# split wanted keys
A = args.report_metric_keys.split()
# capture prints into a log file for convenience
A = F'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'
print(F'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' )
print(F'and this script\'s output is also piped into {report_fn}' )
A = Tee(snake_case__ )
print(F'\n*** Running {len(snake_case__ )} benchmarks:' )
print(F'Base command: {" ".join(snake_case__ )}' )
A = 'variation'
A = []
for id, variation in enumerate(tqdm(snake_case__ , desc='Total completion: ' , leave=snake_case__ ) ):
A = base_cmd + variation.split()
results.append(
process_run(
id + 1 , snake_case__ , snake_case__ , snake_case__ , snake_case__ , args.target_metric_key , snake_case__ , args.repeat_times , snake_case__ , args.verbose , ) )
process_results(snake_case__ , args.target_metric_key , snake_case__ , args.base_variation , snake_case__ )
if __name__ == "__main__":
main() | 91 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase = 5000_0000 ) -> int:
__lowerCAmelCase = set()
__lowerCAmelCase = int((limit - 24) ** (1 / 2) )
__lowerCAmelCase = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowercase ) ) )
for primea in primes:
__lowerCAmelCase = primea * primea
for primea in primes:
__lowerCAmelCase = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
__lowerCAmelCase = primea * primea * primea * primea
__lowerCAmelCase = square + cube + tetr
if total >= limit:
break
ret.add(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(f'{solution() = }')
| 689 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : Any ) -> Union[str, Any]: # noqa: E741
lowercase : List[Any] =len(__magic_name__ )
lowercase : List[str] =0
lowercase : List[str] =[0] * n
lowercase : List[Any] =[False] * n
lowercase : List[str] =[False] * n
def dfs(__magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : str ):
if parent == root:
out_edge_count += 1
lowercase : Dict =True
lowercase : int =at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
lowercase : Optional[int] =dfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
lowercase : int =min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
lowercase : str =True
# AP found via cycle
if at == low[to]:
lowercase : str =True
else:
lowercase : Optional[Any] =min(low[at] , __magic_name__ )
return out_edge_count
for i in range(__magic_name__ ):
if not visited[i]:
lowercase : Optional[Any] =0
lowercase : List[Any] =dfs(__magic_name__ , __magic_name__ , -1 , __magic_name__ )
lowercase : int =out_edge_count > 1
for x in range(len(__magic_name__ ) ):
if is_art[x] is True:
print(__magic_name__ )
# Adjacency list of graph
UpperCamelCase_ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 92 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Optional[int] =TextToVideoSDPipeline
a : Optional[int] =TEXT_TO_IMAGE_PARAMS
a : Any =TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a : Union[str, Any] =frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D"""),up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D"""),cross_attention_dim=32,attention_head_dim=4,)
__lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0085,beta_end=0.012,beta_schedule="""scaled_linear""",clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,)
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],latent_channels=4,sample_size=1_28,)
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1e-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=10_00,hidden_act="""gelu""",projection_dim=5_12,)
__lowerCAmelCase = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = TextToVideoSDPipeline(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = """np"""
__lowerCAmelCase = sd_pipe(**__SCREAMING_SNAKE_CASE ).frames
__lowerCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__lowerCAmelCase = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available(),reason="""XFormers attention is only available with CUDA and `xformers` installed""",)
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
__lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowerCAmelCase = pipe.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=25,output_type="""pt""" ).frames
__lowerCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
__lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase = pipe.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=2,output_type="""pt""" ).frames
__lowerCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 689 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _lowerCAmelCase ( lowercase ) -> Optional[int]:
if not is_accelerate_available():
return method
__lowerCAmelCase = version.parse(accelerate.__version__ ).base_version
if version.parse(lowercase ) < version.parse("""0.17.0""" ):
return method
def wrapper(self , *lowercase , **lowercase ):
if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ):
self._hf_hook.pre_forward(self )
return method(self , *lowercase , **lowercase )
return wrapper
| 689 | 0 |
'''simple docstring'''
def lowercase_ ( __A : int = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int:
"""simple docstring"""
try:
lowercase : str =int(__A )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase : Tuple =2
lowercase : Dict =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowercase : List[Any] =i
while n % i == 0:
lowercase : str =n // i
i += 1
return int(__A )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 94 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
# load base model
__lowerCAmelCase = StableDiffusionPipeline.from_pretrained(lowercase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
__lowerCAmelCase = load_file(lowercase )
__lowerCAmelCase = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
__lowerCAmelCase = pipeline.text_encoder
else:
__lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
__lowerCAmelCase = pipeline.unet
# find the target layer
__lowerCAmelCase = layer_infos.pop(0 )
while len(lowercase ) > -1:
try:
__lowerCAmelCase = curr_layer.__getattr__(lowercase )
if len(lowercase ) > 0:
__lowerCAmelCase = layer_infos.pop(0 )
elif len(lowercase ) == 0:
break
except Exception:
if len(lowercase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
__lowerCAmelCase = layer_infos.pop(0 )
__lowerCAmelCase = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(lowercase )
else:
pair_keys.append(lowercase )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
__lowerCAmelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
__lowerCAmelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase ).unsqueeze(2 ).unsqueeze(3 )
else:
__lowerCAmelCase = state_dict[pair_keys[0]].to(torch.floataa )
__lowerCAmelCase = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase )
# update visited list
for item in pair_keys:
visited.append(lowercase )
return pipeline
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
_a : Optional[int] = parser.parse_args()
_a : Dict = args.base_model_path
_a : Optional[Any] = args.checkpoint_path
_a : Union[str, Any] = args.dump_path
_a : Optional[int] = args.lora_prefix_unet
_a : int = args.lora_prefix_text_encoder
_a : str = args.alpha
_a : Any = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_a : Tuple = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 689 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ,A__ = None ,):
UpperCAmelCase_ : int = {}
if train_file is not None:
UpperCAmelCase_ : Tuple = [train_file]
if eval_file is not None:
UpperCAmelCase_ : Union[str, Any] = [eval_file]
if test_file is not None:
UpperCAmelCase_ : Tuple = [test_file]
UpperCAmelCase_ : Any = datasets.load_dataset("csv" ,data_files=A__ )
UpperCAmelCase_ : Optional[int] = list(ds[list(files.keys() )[0]].features.keys() )
UpperCAmelCase_ : List[str] = features_name.pop(A__ )
UpperCAmelCase_ : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
UpperCAmelCase_ : int = {label: i for i, label in enumerate(A__ )}
UpperCAmelCase_ : int = tokenizer.model_input_names
UpperCAmelCase_ : Tuple = {}
if len(A__ ) == 1:
for k in files.keys():
UpperCAmelCase_ : Any = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
example[features_name[0]] ,truncation=A__ ,max_length=A__ ,padding="max_length" ) ,batched=A__ ,)
elif len(A__ ) == 2:
for k in files.keys():
UpperCAmelCase_ : Dict = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) ,truncation=A__ ,max_length=A__ ,padding="max_length" ,) ,batched=A__ ,)
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
UpperCAmelCase_ : Union[str, Any] = {k: v for k, v in ex.items() if k in input_names}
UpperCAmelCase_ : Any = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
UpperCAmelCase_ : Union[str, Any] = {k: v for k, v in ex.items() if k in input_names}
UpperCAmelCase_ : int = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
UpperCAmelCase_ : str = {k: v for k, v in ex.items() if k in input_names}
UpperCAmelCase_ : int = labelaid[ex[label_name]]
yield (d, label)
UpperCAmelCase_ : List[Any] = (
tf.data.Dataset.from_generator(
A__ ,({k: tf.intaa for k in input_names}, tf.intaa) ,({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) ,)
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
UpperCAmelCase_ : Optional[Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
UpperCAmelCase_ : Optional[Any] = (
tf.data.Dataset.from_generator(
A__ ,({k: tf.intaa for k in input_names}, tf.intaa) ,({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) ,)
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
UpperCAmelCase_ : Optional[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
UpperCAmelCase_ : Dict = (
tf.data.Dataset.from_generator(
A__ ,({k: tf.intaa for k in input_names}, tf.intaa) ,({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) ,)
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
UpperCAmelCase_ : List[Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCamelCase_ = logging.getLogger(__name__)
@dataclass
class UpperCamelCase_ :
__magic_name__ = field(metadata={'''help''': '''Which column contains the label'''} )
__magic_name__ = field(default=__A , metadata={'''help''': '''The path of the training file'''} )
__magic_name__ = field(default=__A , metadata={'''help''': '''The path of the development file'''} )
__magic_name__ = field(default=__A , metadata={'''help''': '''The path of the test file'''} )
__magic_name__ = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__magic_name__ = field(
default=__A , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class UpperCamelCase_ :
__magic_name__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__magic_name__ = field(
default=__A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__magic_name__ = field(
default=__A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__magic_name__ = field(default=__A , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__magic_name__ = field(
default=__A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase_ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,level=logging.INFO ,)
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = get_tfds(
train_file=data_args.train_file ,eval_file=data_args.dev_file ,test_file=data_args.test_file ,tokenizer=A__ ,label_column_id=data_args.label_column_id ,max_seq_length=data_args.max_seq_length ,)
UpperCAmelCase_ : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=len(A__ ) ,labelaid=A__ ,idalabel={id: label for label, id in labelaid.items()} ,finetuning_task="text-classification" ,cache_dir=model_args.cache_dir ,)
with training_args.strategy.scope():
UpperCAmelCase_ : Any = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_pt=bool(".bin" in model_args.model_name_or_path ) ,config=A__ ,cache_dir=model_args.cache_dir ,)
def compute_metrics(A__ ) -> Dict:
UpperCAmelCase_ : Any = np.argmax(p.predictions ,axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
UpperCAmelCase_ : Optional[int] = TFTrainer(
model=A__ ,args=A__ ,train_dataset=A__ ,eval_dataset=A__ ,compute_metrics=A__ ,)
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase_ : List[str] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCAmelCase_ : str = trainer.evaluate()
UpperCAmelCase_ : Tuple = os.path.join(training_args.output_dir ,"eval_results.txt" )
with open(A__ ,"w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(A__ )
return results
if __name__ == "__main__":
main()
| 95 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def _lowerCAmelCase ( lowercase = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2
def _lowerCAmelCase ( lowercase = "" ) -> bool:
if len(lowercase ) == 0:
return True
__lowerCAmelCase = input_str.replace(""" """ , """""" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__lowerCAmelCase = {}
for character in lower_case_input_str:
__lowerCAmelCase = character_freq_dict.get(lowercase , 0 ) + 1
__lowerCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _lowerCAmelCase ( lowercase = "" ) -> None:
print("""\nFor string = """ , lowercase , """:""" )
print(
"""> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(lowercase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
print(
"""> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(lowercase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
if __name__ == "__main__":
_a : int = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
_a : Optional[int] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
| 689 | 0 |
"""simple docstring"""
import math
def a ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any ) -> int:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(__UpperCAmelCase )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__lowerCamelCase = 'Enter the base and the power separated by a comma: '
__lowerCamelCase , __lowerCamelCase = map(int, input(prompt).split(','))
__lowerCamelCase , __lowerCamelCase = map(int, input(prompt).split(','))
# We find the log of each number, using the function res(), which takes two
# arguments.
__lowerCamelCase = res(xa, ya)
__lowerCamelCase = res(xa, ya)
# We check for the largest number
if resa > resa:
print('Largest number is', xa, '^', ya)
elif resa > resa:
print('Largest number is', xa, '^', ya)
else:
print('Both are equal')
| 96 |
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _lowerCAmelCase ( lowercase ) -> List[Any]:
__lowerCAmelCase = VideoMAEConfig()
set_architecture_configs(lowercase , lowercase )
if "finetuned" not in model_name:
__lowerCAmelCase = False
if "finetuned" in model_name:
__lowerCAmelCase = """huggingface/label-files"""
if "kinetics" in model_name:
__lowerCAmelCase = 400
__lowerCAmelCase = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
__lowerCAmelCase = 174
__lowerCAmelCase = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
__lowerCAmelCase = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="""dataset""" ) , """r""" ) )
__lowerCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase ( lowercase , lowercase ) -> Any:
if "small" in model_name:
__lowerCAmelCase = 384
__lowerCAmelCase = 1536
__lowerCAmelCase = 12
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 3
__lowerCAmelCase = 192
__lowerCAmelCase = 768
elif "large" in model_name:
__lowerCAmelCase = 1024
__lowerCAmelCase = 4096
__lowerCAmelCase = 24
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 8
__lowerCAmelCase = 512
__lowerCAmelCase = 2048
elif "huge" in model_name:
__lowerCAmelCase = 1280
__lowerCAmelCase = 5120
__lowerCAmelCase = 32
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 8
__lowerCAmelCase = 640
__lowerCAmelCase = 2560
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def _lowerCAmelCase ( lowercase ) -> List[str]:
if "encoder." in name:
__lowerCAmelCase = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
__lowerCAmelCase = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
__lowerCAmelCase = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
__lowerCAmelCase = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__lowerCAmelCase = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
__lowerCAmelCase = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
__lowerCAmelCase = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
__lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
__lowerCAmelCase = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
__lowerCAmelCase = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
__lowerCAmelCase = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
__lowerCAmelCase = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
__lowerCAmelCase = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__lowerCAmelCase = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__lowerCAmelCase = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
__lowerCAmelCase = name.replace("""head""" , """classifier""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]:
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(lowercase )
if key.startswith("""encoder.""" ):
__lowerCAmelCase = key.replace("""encoder.""" , """""" )
if "qkv" in key:
__lowerCAmelCase = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
__lowerCAmelCase = config.decoder_hidden_size
__lowerCAmelCase = int(key_split[2] )
__lowerCAmelCase = """decoder.decoder_layers."""
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = config.hidden_size
__lowerCAmelCase = int(key_split[1] )
__lowerCAmelCase = """videomae.encoder.layer."""
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val
return orig_state_dict
def _lowerCAmelCase ( ) -> str:
__lowerCAmelCase = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__lowerCAmelCase = np.load(lowercase )
return list(lowercase )
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__lowerCAmelCase = get_videomae_config(lowercase )
if "finetuned" in model_name:
__lowerCAmelCase = VideoMAEForVideoClassification(lowercase )
else:
__lowerCAmelCase = VideoMAEForPreTraining(lowercase )
# download original checkpoint, hosted on Google Drive
__lowerCAmelCase = """pytorch_model.bin"""
gdown.cached_download(lowercase , lowercase , quiet=lowercase )
__lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" )
if "model" in files:
__lowerCAmelCase = files["""model"""]
else:
__lowerCAmelCase = files["""module"""]
__lowerCAmelCase = convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
# verify model on basic input
__lowerCAmelCase = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__lowerCAmelCase = prepare_video()
__lowerCAmelCase = image_processor(lowercase , return_tensors="""pt""" )
if "finetuned" not in model_name:
__lowerCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
__lowerCAmelCase = torch.load(lowercase )
__lowerCAmelCase = model(**lowercase )
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
__lowerCAmelCase = torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(f'Model name not supported. Should be one of {model_names}' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , lowercase , atol=1e-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__lowerCAmelCase = outputs.loss
assert torch.allclose(lowercase , lowercase , atol=1e-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model and image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase )
model.save_pretrained(lowercase )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase , organization="""nielsr""" )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_a : int = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 689 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
__a = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
__a = '</w>'
__a = '@@ '
def a ( snake_case__: Dict ):
'''simple docstring'''
lowercase_ = set()
lowercase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase_ = char
return pairs
# Speech2Text2 has no max input length
__a = {'facebook/s2t-wav2vec2-large-en-de': 1_0_2_4}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :int = VOCAB_FILES_NAMES
a :Optional[int] = PRETRAINED_VOCAB_FILES_MAP
a :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a :Tuple = ['input_ids', 'attention_mask']
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any]="<s>" , SCREAMING_SNAKE_CASE_ : Tuple="<pad>" , SCREAMING_SNAKE_CASE_ : int="</s>" , SCREAMING_SNAKE_CASE_ : int="<unk>" , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , **SCREAMING_SNAKE_CASE_ : str , ) -> Optional[int]:
super().__init__(
unk_token=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowercase_ = do_lower_case
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
lowercase_ = json.load(SCREAMING_SNAKE_CASE_ )
lowercase_ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
lowercase_ = None
lowercase_ = None
else:
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle:
lowercase_ = merges_handle.read().split('''\n''' )[:-1]
lowercase_ = [tuple(merge.split()[:2] ) for merge in merges]
lowercase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowercase_ = {}
@property
def _lowercase ( self : Any ) -> int:
return len(self.decoder )
def _lowercase ( self : List[str] ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Any ) -> int:
lowercase_ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
lowercase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
lowercase_ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase_ , lowercase_ = bigram
lowercase_ = []
lowercase_ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
lowercase_ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase_ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowercase_ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
lowercase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
lowercase_ = ''' '''.join(SCREAMING_SNAKE_CASE_ )
if word == "\n " + BPE_TOKEN_MERGES:
lowercase_ = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(SCREAMING_SNAKE_CASE_ ):
lowercase_ = word.replace(SCREAMING_SNAKE_CASE_ , '''''' )
lowercase_ = word.replace(''' ''' , SCREAMING_SNAKE_CASE_ )
lowercase_ = word
return word
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
lowercase_ = text.lower()
lowercase_ = text.split()
lowercase_ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) ) )
return split_tokens
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : str ) -> int:
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int ) -> str:
lowercase_ = self.decoder.get(SCREAMING_SNAKE_CASE_ , self.unk_token )
return result
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
lowercase_ = ''' '''.join(SCREAMING_SNAKE_CASE_ )
# make sure @@ tokens are concatenated
lowercase_ = ''''''.join(string.split(SCREAMING_SNAKE_CASE_ ) )
return string
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
lowercase_ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
lowercase_ = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
index += 1
return (vocab_file, merges_file)
| 97 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_a : Tuple = """\
"""
_a : Tuple = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
_a : Optional[Any] = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ),reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = 16,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__lowerCAmelCase = """cuda"""
else:
__lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__lowerCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__SCREAMING_SNAKE_CASE ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__lowerCAmelCase = model.config.max_length - 1
else:
__lowerCAmelCase = model.config.max_length
__lowerCAmelCase = tokenizer(
__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,padding=__SCREAMING_SNAKE_CASE,truncation=__SCREAMING_SNAKE_CASE,max_length=__SCREAMING_SNAKE_CASE,return_tensors="""pt""",return_attention_mask=__SCREAMING_SNAKE_CASE,).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = encodings["""input_ids"""]
__lowerCAmelCase = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ),1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ),2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__lowerCAmelCase = []
__lowerCAmelCase = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0,len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase = min(start_index + batch_size,len(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = encoded_texts[start_index:end_index]
__lowerCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
__lowerCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch],dim=1 )
__lowerCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size(),dtype=torch.intaa ).to(__SCREAMING_SNAKE_CASE ), attn_mask],dim=1 )
__lowerCAmelCase = encoded_batch
with torch.no_grad():
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE ).logits
__lowerCAmelCase = out_logits[..., :-1, :].contiguous()
__lowerCAmelCase = labels[..., 1:].contiguous()
__lowerCAmelCase = attn_mask[..., 1:].contiguous()
__lowerCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1,2 ),__SCREAMING_SNAKE_CASE ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__SCREAMING_SNAKE_CASE )}
| 689 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def snake_case__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def snake_case__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.dummy_uncond_unet
_UpperCamelCase = PNDMScheduler()
_UpperCamelCase = PNDMPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
pndm.to(lowerCAmelCase__ )
pndm.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pndm(generator=lowerCAmelCase__ , num_inference_steps=20 , output_type='''numpy''' ).images
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pndm(generator=lowerCAmelCase__ , num_inference_steps=20 , output_type='''numpy''' , return_dict=lowerCAmelCase__ )[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = '''google/ddpm-cifar10-32'''
_UpperCamelCase = UNetaDModel.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = PNDMScheduler()
_UpperCamelCase = PNDMPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
pndm.to(lowerCAmelCase__ )
pndm.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pndm(generator=lowerCAmelCase__ , output_type='''numpy''' ).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 98 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Union[str, Any] =["""image_processor"""]
a : Dict ="""SamImageProcessor"""
def __init__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.image_processor
__lowerCAmelCase = -10
__lowerCAmelCase = self.image_processor.size["""longest_edge"""]
def __call__( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = self.image_processor(
__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
# pop arguments that are not used in the foward but used nevertheless
__lowerCAmelCase = encoding_image_processor["""original_sizes"""]
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks if Torch or TF tensor
__lowerCAmelCase = original_sizes.numpy()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._check_and_preprocess_points(
input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = self._normalize_and_convert(
__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,)
return encoding_image_processor
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="pt",):
'''simple docstring'''
if input_points is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0] ) for point in input_points
]
else:
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
for point, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__lowerCAmelCase , __lowerCAmelCase = self._pad_points_and_labels(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_labels is not None:
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0],is_bounding_box=__SCREAMING_SNAKE_CASE )
for box in input_boxes
]
else:
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,is_bounding_box=__SCREAMING_SNAKE_CASE )
for box, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
]
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
__lowerCAmelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points} )
if input_labels is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels} )
return encoding_image_processor
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = max([point.shape[0] for point in input_points] )
__lowerCAmelCase = []
for i, point in enumerate(__SCREAMING_SNAKE_CASE ):
if point.shape[0] != expected_nb_points:
__lowerCAmelCase = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value],axis=0 )
__lowerCAmelCase = np.append(input_labels[i],[self.point_pad_value] )
processed_input_points.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = processed_input_points
return input_points, input_labels
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = original_size
__lowerCAmelCase , __lowerCAmelCase = self.image_processor._get_preprocess_shape(__SCREAMING_SNAKE_CASE,longest_edge=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = deepcopy(__SCREAMING_SNAKE_CASE ).astype(__SCREAMING_SNAKE_CASE )
if is_bounding_box:
__lowerCAmelCase = coords.reshape(-1,2,2 )
__lowerCAmelCase = coords[..., 0] * (new_w / old_w)
__lowerCAmelCase = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__lowerCAmelCase = coords.reshape(-1,4 )
return coords
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,):
'''simple docstring'''
if input_points is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks for TF or Torch tensor
__lowerCAmelCase = input_points.numpy().tolist()
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_points[0],__SCREAMING_SNAKE_CASE ):
raise ValueError("""Input points must be a list of list of floating points.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for input_point in input_points]
else:
__lowerCAmelCase = None
if input_labels is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ):
__lowerCAmelCase = input_labels.numpy().tolist()
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_labels[0],__SCREAMING_SNAKE_CASE ):
raise ValueError("""Input labels must be a list of list integers.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for label in input_labels]
else:
__lowerCAmelCase = None
if input_boxes is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ):
__lowerCAmelCase = input_boxes.numpy().tolist()
if (
not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0],__SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0][0],__SCREAMING_SNAKE_CASE )
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa ) for box in input_boxes]
else:
__lowerCAmelCase = None
return input_points, input_labels, input_boxes
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(__SCREAMING_SNAKE_CASE ) )
def lowerCamelCase__ ( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.image_processor.post_process_masks(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
| 689 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 99 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
_a : int = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
a : Optional[str] =field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
a : Optional[str] =field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
a : int =field(
default=10_24 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
a : bool =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the training data."""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the validation data."""} )
a : Optional[str] =field(default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the test data."""} )
def lowerCamelCase__ ( self ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
__lowerCAmelCase = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__lowerCAmelCase = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _UpperCAmelCase :
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a : str =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def _lowerCAmelCase ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
__lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(lowercase )
datasets.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__lowerCAmelCase = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__lowerCAmelCase = data_args.train_file.split(""".""" )[-1]
__lowerCAmelCase = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__lowerCAmelCase = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(f'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
__lowerCAmelCase = load_dataset("""csv""" , data_files=lowercase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__lowerCAmelCase = load_dataset("""json""" , data_files=lowercase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__lowerCAmelCase = raw_datasets["""train"""].features["""label"""].names
__lowerCAmelCase = len(lowercase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__lowerCAmelCase = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowercase , )
__lowerCAmelCase = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__lowerCAmelCase = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__lowerCAmelCase = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__lowerCAmelCase = {"""Refused""": 0, """Entailed""": 1}
__lowerCAmelCase = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
__lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowercase ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowercase ):
__lowerCAmelCase = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
__lowerCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__lowerCAmelCase = examples["""statement"""]
__lowerCAmelCase = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
__lowerCAmelCase = tokenizer(lowercase , lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase )
__lowerCAmelCase = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
__lowerCAmelCase = raw_datasets.map(
lowercase , batched=lowercase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__lowerCAmelCase = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
__lowerCAmelCase = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__lowerCAmelCase = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
__lowerCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
__lowerCAmelCase = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
__lowerCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowercase ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase ):
__lowerCAmelCase = p.predictions[0] if isinstance(p.predictions , lowercase ) else p.predictions
__lowerCAmelCase = np.argmax(lowercase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__lowerCAmelCase = default_data_collator
elif training_args.fpaa:
__lowerCAmelCase = DataCollatorWithPadding(lowercase , pad_to_multiple_of=8 )
else:
__lowerCAmelCase = None
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=lowercase , args=lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase , tokenizer=lowercase , data_collator=lowercase , )
# Training
if training_args.do_train:
__lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowercase )
__lowerCAmelCase = train_result.metrics
__lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase )
)
__lowerCAmelCase = min(lowercase , len(lowercase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , lowercase )
trainer.save_metrics("""train""" , lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowerCAmelCase = trainer.evaluate(eval_dataset=lowercase )
__lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase )
__lowerCAmelCase = min(lowercase , len(lowercase ) )
trainer.log_metrics("""eval""" , lowercase )
trainer.save_metrics("""eval""" , lowercase )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__lowerCAmelCase = predict_dataset.remove_columns("""label""" )
__lowerCAmelCase = trainer.predict(lowercase , metric_key_prefix="""predict""" ).predictions
__lowerCAmelCase = np.argmax(lowercase , axis=1 )
__lowerCAmelCase = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(lowercase , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(lowercase ):
__lowerCAmelCase = label_list[item]
writer.write(f'{index}\t{item}\n' )
__lowerCAmelCase = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase )
else:
trainer.create_model_card(**lowercase )
def _lowerCAmelCase ( lowercase ) -> str:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 689 | 0 |
def __snake_case ( lowerCAmelCase_ ) -> bool:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(lowerCAmelCase_ )
if number < 0:
return False
SCREAMING_SNAKE_CASE__ = number * number
while number > 0:
if number % 1_0 != number_square % 1_0:
return False
number //= 1_0
number_square //= 1_0
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 100 |
'''simple docstring'''
import os
import sys
import unittest
_a : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_a : Union[str, Any] = os.path.join(git_repo_path, """src""", """diffusers""")
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = find_backend(""" if not is_torch_available():""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__lowerCAmelCase = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__lowerCAmelCase = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch_and_transformers_and_onnx""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""",__SCREAMING_SNAKE_CASE )
self.assertIn("""torch_and_transformers""",__SCREAMING_SNAKE_CASE )
self.assertIn("""flax_and_transformers""",__SCREAMING_SNAKE_CASE )
self.assertIn("""torch_and_transformers_and_onnx""",__SCREAMING_SNAKE_CASE )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""",objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""",objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""",objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""",objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""",objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""",objects["""torch_and_transformers_and_onnx"""] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = create_dummy_object("""CONSTANT""","""'torch'""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""\nCONSTANT = None\n""" )
__lowerCAmelCase = create_dummy_object("""function""","""'torch'""" )
self.assertEqual(
__SCREAMING_SNAKE_CASE,"""\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
__lowerCAmelCase = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
__lowerCAmelCase = create_dummy_object("""FakeClass""","""'torch'""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
__lowerCAmelCase = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""],__SCREAMING_SNAKE_CASE )
| 689 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Union[str, Any] =logging.get_logger(__name__)
lowerCAmelCase__ : Tuple ={
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """wavlm"""
def __init__( self , lowerCAmelCase__=3_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__="group" , lowerCAmelCase__="gelu" , lowerCAmelCase__=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCAmelCase__=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase__=(1_0, 3, 3, 3, 3, 2, 2) , lowerCAmelCase__=False , lowerCAmelCase__=1_2_8 , lowerCAmelCase__=1_6 , lowerCAmelCase__=3_2_0 , lowerCAmelCase__=8_0_0 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.05 , lowerCAmelCase__=1_0 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1_0 , lowerCAmelCase__=3_2_0 , lowerCAmelCase__=2 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_0_0 , lowerCAmelCase__=2_5_6 , lowerCAmelCase__=2_5_6 , lowerCAmelCase__=0.1 , lowerCAmelCase__="mean" , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=2_5_6 , lowerCAmelCase__=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , lowerCAmelCase__=(5, 3, 3, 1, 1) , lowerCAmelCase__=(1, 2, 3, 1, 1) , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=8_0 , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , lowerCAmelCase__=False , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : List[str] = feat_extract_norm
SCREAMING_SNAKE_CASE_ : List[Any] = feat_extract_activation
SCREAMING_SNAKE_CASE_ : Optional[int] = list(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = list(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = list(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = conv_bias
SCREAMING_SNAKE_CASE_ : Any = num_buckets
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_bucket_distance
SCREAMING_SNAKE_CASE_ : Any = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE_ : Dict = len(self.conv_dim )
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Tuple = hidden_act
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : int = hidden_dropout
SCREAMING_SNAKE_CASE_ : Dict = attention_dropout
SCREAMING_SNAKE_CASE_ : int = activation_dropout
SCREAMING_SNAKE_CASE_ : List[str] = feat_proj_dropout
SCREAMING_SNAKE_CASE_ : int = final_dropout
SCREAMING_SNAKE_CASE_ : Optional[Any] = layerdrop
SCREAMING_SNAKE_CASE_ : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = initializer_range
SCREAMING_SNAKE_CASE_ : List[str] = num_ctc_classes
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE_ : str = do_stable_layer_norm
SCREAMING_SNAKE_CASE_ : int = use_weighted_layer_sum
SCREAMING_SNAKE_CASE_ : Optional[Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE_ : Any = apply_spec_augment
SCREAMING_SNAKE_CASE_ : Tuple = mask_time_prob
SCREAMING_SNAKE_CASE_ : List[Any] = mask_time_length
SCREAMING_SNAKE_CASE_ : Tuple = mask_time_min_masks
SCREAMING_SNAKE_CASE_ : Union[str, Any] = mask_feature_prob
SCREAMING_SNAKE_CASE_ : Dict = mask_feature_length
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE_ : str = num_codevectors_per_group
SCREAMING_SNAKE_CASE_ : Dict = num_codevector_groups
SCREAMING_SNAKE_CASE_ : str = contrastive_logits_temperature
SCREAMING_SNAKE_CASE_ : Tuple = num_negatives
SCREAMING_SNAKE_CASE_ : Optional[int] = codevector_dim
SCREAMING_SNAKE_CASE_ : Optional[int] = proj_codevector_dim
SCREAMING_SNAKE_CASE_ : str = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE_ : Any = ctc_loss_reduction
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE_ : Dict = add_adapter
SCREAMING_SNAKE_CASE_ : Dict = adapter_kernel_size
SCREAMING_SNAKE_CASE_ : Tuple = adapter_stride
SCREAMING_SNAKE_CASE_ : List[Any] = num_adapter_layers
SCREAMING_SNAKE_CASE_ : int = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ : List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ : Optional[Any] = list(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = list(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = xvector_output_dim
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 101 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase ) -> tuple[int, int]:
try:
__lowerCAmelCase = float(lowercase )
except ValueError:
raise ValueError("""Please enter a valid number""" )
__lowerCAmelCase = decimal - int(lowercase )
if fractional_part == 0:
return int(lowercase ), 1
else:
__lowerCAmelCase = len(str(lowercase ).split(""".""" )[1] )
__lowerCAmelCase = int(decimal * (10**number_of_frac_digits) )
__lowerCAmelCase = 10**number_of_frac_digits
__lowerCAmelCase , __lowerCAmelCase = denominator, numerator
while True:
__lowerCAmelCase = dividend % divisor
if remainder == 0:
break
__lowerCAmelCase , __lowerCAmelCase = divisor, remainder
__lowerCAmelCase , __lowerCAmelCase = numerator / divisor, denominator / divisor
return int(lowercase ), int(lowercase )
if __name__ == "__main__":
print(f'{decimal_to_fraction(2) = }')
print(f'{decimal_to_fraction(89.0) = }')
print(f'{decimal_to_fraction("67") = }')
print(f'{decimal_to_fraction("45.0") = }')
print(f'{decimal_to_fraction(1.5) = }')
print(f'{decimal_to_fraction("6.25") = }')
print(f'{decimal_to_fraction("78td") = }')
| 689 | 0 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__magic_name__ : int = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowercase__ :
"""simple docstring"""
def __init__( self , _A , _A=1_6 , _A=1_3 , _A=7 , _A=1_4 , _A=1_0 , _A=1_9 , _A=5 , _A=4 , _A=True , _A=1_6 , _A=2 , _A=4 , _A=4 , _A="gelu" , _A=0.1 , _A=0.1 , _A=[1, 2, 3, 4, 5] , _A=2_5 , _A=5 , ):
'''simple docstring'''
UpperCamelCase : Dict = d_model
UpperCamelCase : Tuple = parent
UpperCamelCase : Optional[Any] = batch_size
UpperCamelCase : Union[str, Any] = prediction_length
UpperCamelCase : List[Any] = context_length
UpperCamelCase : int = cardinality
UpperCamelCase : Tuple = num_time_features
UpperCamelCase : List[Any] = lags_sequence
UpperCamelCase : Optional[Any] = embedding_dimension
UpperCamelCase : Any = is_training
UpperCamelCase : Any = hidden_size
UpperCamelCase : List[str] = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : Any = intermediate_size
UpperCamelCase : Tuple = hidden_act
UpperCamelCase : Union[str, Any] = hidden_dropout_prob
UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase : Union[str, Any] = context_length
UpperCamelCase : str = prediction_length + label_length
UpperCamelCase : Dict = label_length
UpperCamelCase : Any = moving_average
UpperCamelCase : int = autocorrelation_factor
def _a ( self ):
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def _a ( self , _A ):
'''simple docstring'''
UpperCamelCase : List[str] = config.context_length + max(config.lags_sequence )
UpperCamelCase : Any = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
UpperCamelCase : Any = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
UpperCamelCase : Union[str, Any] = floats_tensor([self.batch_size, _past_length] )
UpperCamelCase : str = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
UpperCamelCase : int = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
UpperCamelCase : Optional[Any] = floats_tensor([self.batch_size, config.prediction_length] )
UpperCamelCase : Dict = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.get_config()
UpperCamelCase : int = self.prepare_autoformer_inputs_dict(_A )
return config, inputs_dict
def _a ( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def _a ( self , _A , _A ):
'''simple docstring'''
UpperCamelCase : str = AutoformerModel(config=_A ).to(_A ).eval()
UpperCamelCase : List[str] = model(**_A )
UpperCamelCase : Any = outputs.encoder_last_hidden_state
UpperCamelCase : List[Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase : Optional[Any] = model.get_encoder()
encoder.save_pretrained(_A )
UpperCamelCase : Union[str, Any] = AutoformerEncoder.from_pretrained(_A ).to(_A )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = model.create_network_inputs(**_A )
UpperCamelCase , UpperCamelCase : Tuple = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
UpperCamelCase : Dict = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
UpperCamelCase : str = encoder(inputs_embeds=_A )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
UpperCamelCase : Tuple = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
UpperCamelCase : Optional[Any] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
UpperCamelCase : List[Any] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
UpperCamelCase : str = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase : Any = model.get_decoder()
decoder.save_pretrained(_A )
UpperCamelCase : Optional[Any] = AutoformerDecoder.from_pretrained(_A ).to(_A )
UpperCamelCase : str = decoder(
trend=_A , inputs_embeds=_A , encoder_hidden_states=_A , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowercase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Tuple = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__lowerCAmelCase : str = (AutoformerForPrediction,) if is_torch_available() else ()
__lowerCAmelCase : Dict = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : Any = False
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : Optional[int] = False
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = AutoformerModelTester(self )
UpperCamelCase : int = ConfigTester(self , config_class=_A , has_text_modality=_A )
def _a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCamelCase : Optional[int] = model_class(_A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
UpperCamelCase , UpperCamelCase : Optional[Any] = model_class.from_pretrained(_A , output_loading_info=_A )
self.assertEqual(info["""missing_keys"""] , [] )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_A )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def _a ( self ):
'''simple docstring'''
pass
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Any = inspect.signature(getattr(_A , """forward""" ) )
# The main input is the name of the argument after `self`
UpperCamelCase : Optional[Any] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , _A )
def _a ( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Dict = model_class(_A )
UpperCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : str = [*signature.parameters.keys()]
UpperCamelCase : Optional[Any] = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(_A )] , _A )
def _a ( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Dict = True
UpperCamelCase : Tuple = getattr(self.model_tester , """seq_length""" , _A )
UpperCamelCase : Dict = getattr(self.model_tester , """decoder_seq_length""" , _A )
UpperCamelCase : Optional[Any] = getattr(self.model_tester , """encoder_seq_length""" , _A )
UpperCamelCase : Optional[int] = getattr(self.model_tester , """d_model""" , _A )
UpperCamelCase : Optional[Any] = getattr(self.model_tester , """num_attention_heads""" , _A )
UpperCamelCase : Tuple = d_model // num_attention_heads
for model_class in self.all_model_classes:
UpperCamelCase : List[Any] = True
UpperCamelCase : Any = False
UpperCamelCase : List[str] = True
UpperCamelCase : List[str] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCamelCase : Tuple = model(**self._prepare_for_class(_A , _A ) )
UpperCamelCase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase : Tuple = True
UpperCamelCase : Optional[int] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCamelCase : List[Any] = model(**self._prepare_for_class(_A , _A ) )
UpperCamelCase : List[str] = outputs.encoder_attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
UpperCamelCase : List[str] = len(_A )
UpperCamelCase : List[str] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(_A , _A )
# decoder attentions
UpperCamelCase : Optional[int] = outputs.decoder_attentions
self.assertIsInstance(_A , (list, tuple) )
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
UpperCamelCase : int = outputs.cross_attentions
self.assertIsInstance(_A , (list, tuple) )
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
UpperCamelCase : Any = True
UpperCamelCase : Dict = True
UpperCamelCase : int = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCamelCase : int = model(**self._prepare_for_class(_A , _A ) )
self.assertEqual(out_len + 2 , len(_A ) )
UpperCamelCase : str = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def _a ( self ):
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def UpperCamelCase (SCREAMING_SNAKE_CASE="train-batch.pt" ):
UpperCamelCase : List[str] = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=SCREAMING_SNAKE_CASE , repo_type="""dataset""" )
UpperCamelCase : Optional[int] = torch.load(SCREAMING_SNAKE_CASE , map_location=SCREAMING_SNAKE_CASE )
return batch
@require_torch
@slow
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Dict = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(_A )
UpperCamelCase : List[str] = prepare_batch()
with torch.no_grad():
UpperCamelCase : Dict = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
UpperCamelCase : Optional[int] = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , _A )
UpperCamelCase : Optional[int] = torch.tensor(
[[0.35_93, -1.33_98, 0.63_30], [0.22_79, 1.53_96, -0.17_92], [0.04_50, 1.32_25, -0.23_35]] , device=_A )
self.assertTrue(torch.allclose(output[0, :3, :3] , _A , atol=_A ) )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(_A )
UpperCamelCase : Dict = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
UpperCamelCase : Optional[Any] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
UpperCamelCase : Dict = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , _A )
UpperCamelCase : Any = torch.tensor(
[[-0.07_34, -0.90_36, 0.83_58], [4.71_86, 2.41_13, 1.95_81], [1.79_53, 2.35_58, 1.29_70]] , device=_A )
self.assertTrue(torch.allclose(output[0, :3, :3] , _A , atol=_A ) )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Dict = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(_A )
UpperCamelCase : List[Any] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
UpperCamelCase : Optional[Any] = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
UpperCamelCase : Tuple = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , _A )
UpperCamelCase : Dict = torch.tensor([31_30.67_63, 40_56.52_93, 70_53.07_86] , device=_A )
UpperCamelCase : Optional[Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , _A , rtol=1e-1 ) )
| 102 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_a : Dict = _symbol_database.Default()
_a : Union[str, Any] = _descriptor_pool.Default().AddSerializedFile(
b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
_a : str = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_a : str = None
_a : Union[str, Any] = b"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_a : Optional[int] = 4_5
_a : List[Any] = 1_5_8_1
_a : str = 1_5_1_7
_a : Optional[Any] = 1_5_7_0
_a : List[str] = 1_5_8_4
_a : List[Any] = 1_7_9_3
_a : Union[str, Any] = 1_7_9_5
_a : Tuple = 1_9_1_6
_a : List[Any] = 1_8_6_4
_a : Any = 1_9_0_5
_a : Optional[Any] = 1_9_1_9
_a : Optional[int] = 2_4_2_9
_a : Tuple = 2_2_0_8
_a : Optional[Any] = 2_4_1_8
_a : List[Any] = 2_3_2_3
_a : str = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 689 | 0 |
"""simple docstring"""
snake_case = [
'''DownloadConfig''',
'''DownloadManager''',
'''DownloadMode''',
'''StreamingDownloadManager''',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 103 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : torch.FloatTensor
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE=True,):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = layers_per_block
__lowerCAmelCase = torch.nn.Convad(
__SCREAMING_SNAKE_CASE,block_out_channels[0],kernel_size=3,stride=1,padding=1,)
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
# down
__lowerCAmelCase = block_out_channels[0]
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = block_out_channels[i]
__lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase = get_down_block(
__SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,add_downsample=not is_final_block,resnet_eps=1e-6,downsample_padding=0,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
__lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""",attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
# out
__lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[-1],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = 2 * out_channels if double_z else out_channels
__lowerCAmelCase = nn.Convad(block_out_channels[-1],__SCREAMING_SNAKE_CASE,3,padding=1 )
__lowerCAmelCase = False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = x
__lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(""">=""","""1.11.0""" ):
for down_block in self.down_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
__lowerCAmelCase = down_block(__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE )
# post-process
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE="group",):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = layers_per_block
__lowerCAmelCase = nn.Convad(
__SCREAMING_SNAKE_CASE,block_out_channels[-1],kernel_size=3,stride=1,padding=1,)
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
__lowerCAmelCase = in_channels if norm_type == """spatial""" else None
# mid
__lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type,attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
# up
__lowerCAmelCase = list(reversed(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = reversed_block_out_channels[i]
__lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase = get_up_block(
__SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block + 1,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,prev_output_channel=__SCREAMING_SNAKE_CASE,add_upsample=not is_final_block,resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,resnet_time_scale_shift=__SCREAMING_SNAKE_CASE,)
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = output_channel
# out
if norm_type == "spatial":
__lowerCAmelCase = SpatialNorm(block_out_channels[0],__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[0],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = nn.Convad(block_out_channels[0],__SCREAMING_SNAKE_CASE,3,padding=1 )
__lowerCAmelCase = False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
__lowerCAmelCase = z
__lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(""">=""","""1.11.0""" ):
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
else:
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
else:
# middle
__lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = up_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="random",__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = n_e
__lowerCAmelCase = vq_embed_dim
__lowerCAmelCase = beta
__lowerCAmelCase = legacy
__lowerCAmelCase = nn.Embedding(self.n_e,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e,1.0 / self.n_e )
__lowerCAmelCase = remap
if self.remap is not None:
self.register_buffer("""used""",torch.tensor(np.load(self.remap ) ) )
__lowerCAmelCase = self.used.shape[0]
__lowerCAmelCase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__lowerCAmelCase = self.re_embed
__lowerCAmelCase = self.re_embed + 1
print(
f'Remapping {self.n_e} indices to {self.re_embed} indices. '
f'Using {self.unknown_index} for unknown indices.' )
else:
__lowerCAmelCase = n_e
__lowerCAmelCase = sane_index_shape
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
__lowerCAmelCase = inds.reshape(ishape[0],-1 )
__lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = (inds[:, :, None] == used[None, None, ...]).long()
__lowerCAmelCase = match.argmax(-1 )
__lowerCAmelCase = match.sum(2 ) < 1
if self.unknown_index == "random":
__lowerCAmelCase = torch.randint(0,self.re_embed,size=new[unknown].shape ).to(device=new.device )
else:
__lowerCAmelCase = self.unknown_index
return new.reshape(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
__lowerCAmelCase = inds.reshape(ishape[0],-1 )
__lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
__lowerCAmelCase = 0 # simply set to zero
__lowerCAmelCase = torch.gather(used[None, :][inds.shape[0] * [0], :],1,__SCREAMING_SNAKE_CASE )
return back.reshape(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = z.permute(0,2,3,1 ).contiguous()
__lowerCAmelCase = z.view(-1,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__lowerCAmelCase = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE,self.embedding.weight ),dim=1 )
__lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape )
__lowerCAmelCase = None
__lowerCAmelCase = None
# compute loss for embedding
if not self.legacy:
__lowerCAmelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__lowerCAmelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__lowerCAmelCase = z + (z_q - z).detach()
# reshape back to match original input shape
__lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous()
if self.remap is not None:
__lowerCAmelCase = min_encoding_indices.reshape(z.shape[0],-1 ) # add batch axis
__lowerCAmelCase = self.remap_to_used(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = min_encoding_indices.reshape(-1,1 ) # flatten
if self.sane_index_shape:
__lowerCAmelCase = min_encoding_indices.reshape(z_q.shape[0],z_q.shape[2],z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if self.remap is not None:
__lowerCAmelCase = indices.reshape(shape[0],-1 ) # add batch axis
__lowerCAmelCase = self.unmap_to_all(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE )
if shape is not None:
__lowerCAmelCase = z_q.view(__SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
__lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous()
return z_q
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = parameters
__lowerCAmelCase , __lowerCAmelCase = torch.chunk(__SCREAMING_SNAKE_CASE,2,dim=1 )
__lowerCAmelCase = torch.clamp(self.logvar,-30.0,20.0 )
__lowerCAmelCase = deterministic
__lowerCAmelCase = torch.exp(0.5 * self.logvar )
__lowerCAmelCase = torch.exp(self.logvar )
if self.deterministic:
__lowerCAmelCase = __lowerCAmelCase = torch.zeros_like(
self.mean,device=self.parameters.device,dtype=self.parameters.dtype )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
__lowerCAmelCase = randn_tensor(
self.mean.shape,generator=__SCREAMING_SNAKE_CASE,device=self.parameters.device,dtype=self.parameters.dtype )
__lowerCAmelCase = self.mean + self.std * sample
return x
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean,2 ) + self.var - 1.0 - self.logvar,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar,dim=[1, 2, 3],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
__lowerCAmelCase = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean,2 ) / self.var,dim=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.mean
| 689 | 0 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
UpperCamelCase = ["""bart.large""", """bart.large.mnli""", """bart.large.cnn""", """bart_xsum/model.pt"""]
UpperCamelCase = {"""bart.large""": BartModel, """bart.large.mnli""": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("""0.9.0"""):
raise Exception("""requires fairseq >= 0.9.0""")
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = """ Hello world! cécé herlolip"""
UpperCamelCase = [
("""model.classification_heads.mnli.dense.weight""", """classification_head.dense.weight"""),
("""model.classification_heads.mnli.dense.bias""", """classification_head.dense.bias"""),
("""model.classification_heads.mnli.out_proj.weight""", """classification_head.out_proj.weight"""),
("""model.classification_heads.mnli.out_proj.bias""", """classification_head.out_proj.bias"""),
]
def _lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_, UpperCAmelCase_ )
def _lowerCamelCase ( UpperCAmelCase_ : str, UpperCAmelCase_ : Tuple, UpperCAmelCase_ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
A__ = dct.pop(UpperCAmelCase_ )
A__ = val
def _lowerCamelCase ( UpperCAmelCase_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
A__ = torch.load(UpperCAmelCase_, map_location="cpu" )
A__ = torch.hub.load("pytorch/fairseq", "bart.large.cnn" ).eval()
hub_interface.model.load_state_dict(sd["model"] )
return hub_interface
def _lowerCamelCase ( UpperCAmelCase_ : Optional[int] ) -> Dict:
"""simple docstring"""
A__ , A__ = emb.weight.shape
A__ = nn.Linear(UpperCAmelCase_, UpperCAmelCase_, bias=UpperCAmelCase_ )
A__ = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowerCamelCase ( UpperCAmelCase_ : Optional[Any], UpperCAmelCase_ : int, UpperCAmelCase_ : Optional[int]=None ) -> Any:
"""simple docstring"""
if not os.path.exists(UpperCAmelCase_ ):
A__ = torch.hub.load("pytorch/fairseq", UpperCAmelCase_ ).eval()
else:
A__ = load_xsum_checkpoint(UpperCAmelCase_ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
A__ = checkpoint_path.replace(".", "-" )
A__ = BartConfig.from_pretrained(UpperCAmelCase_ )
A__ = bart.encode(UpperCAmelCase_ ).unsqueeze(0 )
A__ = BartTokenizer.from_pretrained(UpperCAmelCase_ ).encode(UpperCAmelCase_, return_tensors="pt" ).unsqueeze(0 )
if not torch.eq(UpperCAmelCase_, UpperCAmelCase_ ).all():
raise ValueError(
F"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" )
if checkpoint_path == "bart.large.mnli":
A__ = bart.state_dict()
remove_ignore_keys_(UpperCAmelCase_ )
A__ = state_dict["model.decoder.embed_tokens.weight"]
for src, dest in mnli_rename_keys:
rename_key(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ )
A__ = BartForSequenceClassification(UpperCAmelCase_ ).eval()
model.load_state_dict(UpperCAmelCase_ )
A__ = bart.predict("mnli", UpperCAmelCase_, return_logits=UpperCAmelCase_ )
A__ = model(UpperCAmelCase_ )[0] # logits
else: # no classification heads to worry about
A__ = bart.model.state_dict()
remove_ignore_keys_(UpperCAmelCase_ )
A__ = state_dict["decoder.embed_tokens.weight"]
A__ = bart.extract_features(UpperCAmelCase_ )
if hf_checkpoint_name == "facebook/bart-large":
A__ = BartModel(UpperCAmelCase_ ).eval()
model.load_state_dict(UpperCAmelCase_ )
A__ = model(UpperCAmelCase_ ).model[0]
else:
A__ = BartForConditionalGeneration(UpperCAmelCase_ ).eval() # an existing summarization ckpt
model.model.load_state_dict(UpperCAmelCase_ )
if hasattr(UpperCAmelCase_, "lm_head" ):
A__ = make_linear_from_emb(model.model.shared )
A__ = model.model(UpperCAmelCase_ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" )
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""", default=None, type=str, help="""Which huggingface architecture to use: bart-large-xsum"""
)
UpperCamelCase = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 104 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_a : Optional[int] = logging.get_logger(__name__)
_a : int = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_a : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(lowerCAmelCase_ )} )
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
a : int =field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : int =field(
default=1_28 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
a : int =field(
default=64 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
a : int =field(
default=30 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
a : float =field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a : int =field(
default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a : int =field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
a : int =field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Optional[Any] ="""train"""
a : Optional[int] ="""dev"""
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : SquadDataTrainingArguments
a : List[SquadFeatures]
a : Split
a : bool
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = Split.train,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = "pt",):
'''simple docstring'''
__lowerCAmelCase = args
__lowerCAmelCase = is_language_sensitive
__lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
try:
__lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
__lowerCAmelCase = mode
# Load data features from cache or dataset file
__lowerCAmelCase = """v2""" if args.version_2_with_negative else """v1"""
__lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase = cached_features_file + """.lock"""
with FileLock(__SCREAMING_SNAKE_CASE ):
if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache:
__lowerCAmelCase = time.time()
__lowerCAmelCase = torch.load(__SCREAMING_SNAKE_CASE )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__lowerCAmelCase = self.old_features["""features"""]
__lowerCAmelCase = self.old_features.get("""dataset""",__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.old_features.get("""examples""",__SCREAMING_SNAKE_CASE )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
""" future run""" )
else:
if mode == Split.dev:
__lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
__lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
__lowerCAmelCase , __lowerCAmelCase = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__SCREAMING_SNAKE_CASE,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples},__SCREAMING_SNAKE_CASE,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.features[i]
__lowerCAmelCase = torch.tensor(feature.input_ids,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.attention_mask,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.token_type_ids,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.cls_index,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.p_mask,dtype=torch.float )
__lowerCAmelCase = torch.tensor(feature.is_impossible,dtype=torch.float )
__lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__lowerCAmelCase = torch.tensor(feature.start_position,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 689 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self ,snake_case__ ,snake_case__=7 ,snake_case__=3 ,snake_case__=30 ,snake_case__=400 ,snake_case__=True ,snake_case__=None ,snake_case__=True ,snake_case__=[0.5, 0.5, 0.5] ,snake_case__=[0.5, 0.5, 0.5] ,snake_case__=True ,snake_case__=1 / 255 ,snake_case__=True ,):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE_ : List[str] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : str = batch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE_ : Tuple = min_resolution
SCREAMING_SNAKE_CASE_ : Any = max_resolution
SCREAMING_SNAKE_CASE_ : str = do_resize
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size
SCREAMING_SNAKE_CASE_ : Tuple = do_normalize
SCREAMING_SNAKE_CASE_ : Optional[int] = image_mean
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_std
SCREAMING_SNAKE_CASE_ : Tuple = do_rescale
SCREAMING_SNAKE_CASE_ : Any = rescale_factor
SCREAMING_SNAKE_CASE_ : Any = do_pad
def snake_case ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case ( self ,snake_case__ ,snake_case__=False ):
if not batched:
SCREAMING_SNAKE_CASE_ : List[str] = image_inputs[0]
if isinstance(snake_case__ ,Image.Image ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = image.size
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE_ : Dict = int(self.size['shortest_edge'] * h / w )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.size['shortest_edge']
elif w > h:
SCREAMING_SNAKE_CASE_ : str = self.size['shortest_edge']
SCREAMING_SNAKE_CASE_ : Optional[int] = int(self.size['shortest_edge'] * w / h )
else:
SCREAMING_SNAKE_CASE_ : int = self.size['shortest_edge']
SCREAMING_SNAKE_CASE_ : List[Any] = self.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = max(snake_case__ ,key=lambda snake_case__ : item[0] )[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = max(snake_case__ ,key=lambda snake_case__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Dict = DetaImageProcessor if is_vision_available() else None
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DetaImageProcessingTester(self )
@property
def snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ ,'image_mean' ) )
self.assertTrue(hasattr(snake_case__ ,'image_std' ) )
self.assertTrue(hasattr(snake_case__ ,'do_normalize' ) )
self.assertTrue(hasattr(snake_case__ ,'do_resize' ) )
self.assertTrue(hasattr(snake_case__ ,'do_rescale' ) )
self.assertTrue(hasattr(snake_case__ ,'do_pad' ) )
self.assertTrue(hasattr(snake_case__ ,'size' ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad ,snake_case__ )
def snake_case ( self ):
pass
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.image_processor_tester.get_expected_values(snake_case__ ,batched=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor_tester.get_expected_values(snake_case__ ,batched=snake_case__ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.image_processor_tester.get_expected_values(snake_case__ ,batched=snake_case__ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def snake_case ( self ):
# prepare image and target
SCREAMING_SNAKE_CASE_ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Any = {'image_id': 39769, 'annotations': target}
# encode them
SCREAMING_SNAKE_CASE_ : int = DetaImageProcessor()
SCREAMING_SNAKE_CASE_ : int = image_processing(images=snake_case__ ,annotations=snake_case__ ,return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE_ : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,snake_case__ ,atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,snake_case__ ) )
# verify boxes
SCREAMING_SNAKE_CASE_ : int = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,snake_case__ ,atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ : int = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,snake_case__ ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,snake_case__ ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,snake_case__ ) )
# verify orig_size
SCREAMING_SNAKE_CASE_ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,snake_case__ ) )
# verify size
SCREAMING_SNAKE_CASE_ : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,snake_case__ ) )
@slow
def snake_case ( self ):
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r' ) as f:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
SCREAMING_SNAKE_CASE_ : List[str] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
SCREAMING_SNAKE_CASE_ : Any = DetaImageProcessor(format='coco_panoptic' )
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(images=snake_case__ ,annotations=snake_case__ ,masks_path=snake_case__ ,return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,snake_case__ ,atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,snake_case__ ) )
# verify boxes
SCREAMING_SNAKE_CASE_ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,snake_case__ ,atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,snake_case__ ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,snake_case__ ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,snake_case__ ) )
# verify masks
SCREAMING_SNAKE_CASE_ : Any = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,snake_case__ )
# verify orig_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,snake_case__ ) )
# verify size
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,snake_case__ ) )
| 105 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _lowerCAmelCase ( lowercase ) -> Optional[Any]:
# vision encoder
if "img_encoder.pos_embed" in name:
__lowerCAmelCase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
__lowerCAmelCase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
__lowerCAmelCase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
__lowerCAmelCase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
__lowerCAmelCase = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
__lowerCAmelCase = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
__lowerCAmelCase = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
__lowerCAmelCase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
__lowerCAmelCase = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
__lowerCAmelCase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
__lowerCAmelCase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
__lowerCAmelCase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
__lowerCAmelCase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
__lowerCAmelCase = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
__lowerCAmelCase = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
__lowerCAmelCase = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
__lowerCAmelCase = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
__lowerCAmelCase = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
__lowerCAmelCase = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
__lowerCAmelCase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
__lowerCAmelCase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
__lowerCAmelCase = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
__lowerCAmelCase = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> Dict:
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(lowercase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase = key.split(""".""" )
__lowerCAmelCase , __lowerCAmelCase = int(key_split[2] ), int(key_split[4] )
__lowerCAmelCase = config.vision_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase = key.split(""".""" )
__lowerCAmelCase = int(key_split[3] )
__lowerCAmelCase = config.text_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[
dim : dim * 2, :
]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
else:
__lowerCAmelCase = rename_key(lowercase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
__lowerCAmelCase = val.squeeze_()
else:
__lowerCAmelCase = val
return orig_state_dict
def _lowerCAmelCase ( ) -> str:
__lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCAmelCase = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase , lowercase="groupvit-gcc-yfcc" , lowercase=False ) -> List[Any]:
__lowerCAmelCase = GroupViTConfig()
__lowerCAmelCase = GroupViTModel(lowercase ).eval()
__lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" )["""model"""]
__lowerCAmelCase = convert_state_dict(lowercase , lowercase )
__lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowercase , strict=lowercase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowercase ) == 0)
# verify result
__lowerCAmelCase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowercase , padding=lowercase , return_tensors="""pt""" )
with torch.no_grad():
__lowerCAmelCase = model(**lowercase )
if model_name == "groupvit-gcc-yfcc":
__lowerCAmelCase = torch.tensor([[13.35_23, 6.36_29]] )
elif model_name == "groupvit-gcc-redcaps":
__lowerCAmelCase = torch.tensor([[16.18_73, 8.62_30]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , lowercase , atol=1e-3 )
processor.save_pretrained(lowercase )
model.save_pretrained(lowercase )
print("""Successfully saved processor and model to""" , lowercase )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowercase , organization="""nielsr""" )
model.push_to_hub(lowercase , organization="""nielsr""" )
if __name__ == "__main__":
_a : int = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
_a : List[str] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 689 | 0 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : str=13 , __UpperCamelCase : Optional[int]=7 , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Any=True , __UpperCamelCase : List[Any]=True , __UpperCamelCase : List[str]=99 , __UpperCamelCase : Optional[int]=32 , __UpperCamelCase : List[Any]=5 , __UpperCamelCase : int=4 , __UpperCamelCase : Any=37 , __UpperCamelCase : Tuple="gelu" , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : int=512 , __UpperCamelCase : Union[str, Any]=16 , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : List[Any]=0.0_2 , __UpperCamelCase : int=4 , ) -> Union[str, Any]:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_attention_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = num_choices
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = None
if self.use_attention_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
A = self.prepare_config_and_inputs()
A , A , A , A = config_and_inputs
A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __UpperCamelCase ( self : Any ) -> Tuple:
A = self.prepare_config_and_inputs()
A , A , A , A = config_and_inputs
A = True
A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCAmelCase__ ( _lowerCamelCase , unittest.TestCase ):
A_ : Tuple = True
A_ : Union[str, Any] = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCamelCase ( self : List[str] ) -> List[str]:
A = FlaxRobertaPreLayerNormModelTester(self )
@slow
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
A = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=__UpperCamelCase )
A = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCamelCase )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
A = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=__UpperCamelCase )
A = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
A = model(__UpperCamelCase )[0]
A = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , __UpperCamelCase )
# compare the actual values for a slice.
A = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 ) )
@slow
def __UpperCamelCase ( self : Dict ) -> List[str]:
A = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=__UpperCamelCase )
A = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
A = model(__UpperCamelCase )[0]
# compare the actual values for a slice.
A = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 ) ) | 106 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_a : Tuple = logging.get_logger(__name__)
_a : Optional[int] = ["""model.decoder.embed_positions.weights"""]
def _lowerCAmelCase ( lowercase ) -> Optional[Any]:
if "emb" in name:
__lowerCAmelCase = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
__lowerCAmelCase = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
__lowerCAmelCase = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
__lowerCAmelCase = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
__lowerCAmelCase = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
__lowerCAmelCase = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
__lowerCAmelCase = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
__lowerCAmelCase = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
__lowerCAmelCase = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
__lowerCAmelCase = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> Tuple[Dict, Dict]:
__lowerCAmelCase = list(state_dict.keys() )
__lowerCAmelCase = {}
for key in keys:
__lowerCAmelCase = state_dict.pop(lowercase )
__lowerCAmelCase = rename_keys(lowercase )
if "in_proj_weight" in key:
# split fused qkv proj
__lowerCAmelCase = val[:hidden_size, :]
__lowerCAmelCase = val[hidden_size : 2 * hidden_size, :]
__lowerCAmelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__lowerCAmelCase = val
else:
__lowerCAmelCase = val
return state_dict, enc_dec_proj_state_dict
def _lowerCAmelCase ( lowercase ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
__lowerCAmelCase = 1024
__lowerCAmelCase = 24
__lowerCAmelCase = 16
elif checkpoint == "medium":
__lowerCAmelCase = 1536
__lowerCAmelCase = 48
__lowerCAmelCase = 24
elif checkpoint == "large":
__lowerCAmelCase = 2048
__lowerCAmelCase = 48
__lowerCAmelCase = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
__lowerCAmelCase = MusicgenDecoderConfig(
hidden_size=lowercase , ffn_dim=hidden_size * 4 , num_hidden_layers=lowercase , num_attention_heads=lowercase , )
return config
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase=None , lowercase=None , lowercase="cpu" ) -> Optional[Any]:
__lowerCAmelCase = MusicGen.get_pretrained(lowercase , device=lowercase )
__lowerCAmelCase = decoder_config_from_checkpoint(lowercase )
__lowerCAmelCase = fairseq_model.lm.state_dict()
__lowerCAmelCase , __lowerCAmelCase = rename_state_dict(
lowercase , hidden_size=decoder_config.hidden_size )
__lowerCAmelCase = TaEncoderModel.from_pretrained("""t5-base""" )
__lowerCAmelCase = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
__lowerCAmelCase = MusicgenForCausalLM(lowercase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__lowerCAmelCase , __lowerCAmelCase = decoder.load_state_dict(lowercase , strict=lowercase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase )
if len(lowercase ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(lowercase ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
__lowerCAmelCase = MusicgenForConditionalGeneration(text_encoder=lowercase , audio_encoder=lowercase , decoder=lowercase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase )
# check we can do a forward pass
__lowerCAmelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__lowerCAmelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__lowerCAmelCase = model(input_ids=lowercase , decoder_input_ids=lowercase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
__lowerCAmelCase = AutoTokenizer.from_pretrained("""t5-base""" )
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
__lowerCAmelCase = MusicgenProcessor(feature_extractor=lowercase , tokenizer=lowercase )
# set the appropriate bos/pad token ids
__lowerCAmelCase = 2048
__lowerCAmelCase = 2048
# set other default generation config params
__lowerCAmelCase = int(30 * audio_encoder.config.frame_rate )
__lowerCAmelCase = True
__lowerCAmelCase = 3.0
if pytorch_dump_folder is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(lowercase )
processor.save_pretrained(lowercase )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(lowercase )
processor.push_to_hub(lowercase )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
_a : List[Any] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 689 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any], UpperCamelCase__ : Tuple, UpperCamelCase__ : int=7, UpperCamelCase__ : Optional[Any]=3, UpperCamelCase__ : Optional[int]=18, UpperCamelCase__ : List[str]=30, UpperCamelCase__ : List[str]=4_00, UpperCamelCase__ : Optional[Any]=True, UpperCamelCase__ : int=None, UpperCamelCase__ : List[Any]=True, ) -> Optional[int]:
_A = size if size is not None else {'height': 18, 'width': 18}
_A = parent
_A = batch_size
_A = num_channels
_A = image_size
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size
_A = apply_ocr
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowercase_ ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __UpperCAmelCase ( self : List[Any] ) -> int:
_A = LayoutLMvaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : str ) -> List[str]:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__, 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase__, 'size' ) )
self.assertTrue(hasattr(UpperCamelCase__, 'apply_ocr' ) )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'height': 18, 'width': 18} )
_A = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {'height': 42, 'width': 42} )
def __UpperCAmelCase ( self : Any ) -> Tuple:
pass
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0], return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
self.assertIsInstance(encoding.words, UpperCamelCase__ )
self.assertIsInstance(encoding.boxes, UpperCamelCase__ )
# Test batched
_A = image_processing(UpperCamelCase__, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__, numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
# Test batched
_A = image_processing(UpperCamelCase__, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__, torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
# Test batched
_A = image_processing(UpperCamelCase__, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
# with apply_OCR = True
_A = LayoutLMvaImageProcessor()
from datasets import load_dataset
_A = load_dataset('hf-internal-testing/fixtures_docvqa', split='test' )
_A = Image.open(ds[0]['file'] ).convert('RGB' )
_A = image_processing(UpperCamelCase__, return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ), len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_A = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
_A = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words, UpperCamelCase__ )
self.assertListEqual(encoding.boxes, UpperCamelCase__ )
# with apply_OCR = False
_A = LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__ )
_A = image_processing(UpperCamelCase__, return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) )
| 107 |
'''simple docstring'''
from collections import deque
def _lowerCAmelCase ( lowercase ) -> Dict:
__lowerCAmelCase = len(lowercase )
__lowerCAmelCase = deque()
__lowerCAmelCase = [False for _ in range(lowercase )]
__lowerCAmelCase = [-1 for _ in range(lowercase )]
__lowerCAmelCase = index_of[:]
def strong_connect(lowercase , lowercase , lowercase ):
__lowerCAmelCase = index # the number when this node is seen
__lowerCAmelCase = index # lowest rank node reachable from here
index += 1
stack.append(lowercase )
__lowerCAmelCase = True
for w in g[v]:
if index_of[w] == -1:
__lowerCAmelCase = strong_connect(lowercase , lowercase , lowercase )
__lowerCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__lowerCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__lowerCAmelCase = []
__lowerCAmelCase = stack.pop()
__lowerCAmelCase = False
component.append(lowercase )
while w != v:
__lowerCAmelCase = stack.pop()
__lowerCAmelCase = False
component.append(lowercase )
components.append(lowercase )
return index
__lowerCAmelCase = []
for v in range(lowercase ):
if index_of[v] == -1:
strong_connect(lowercase , 0 , lowercase )
return components
def _lowerCAmelCase ( lowercase , lowercase ) -> str:
__lowerCAmelCase = [[] for _ in range(lowercase )]
for u, v in edges:
g[u].append(lowercase )
return g
if __name__ == "__main__":
# Test
_a : Any = 7
_a : Tuple = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_a : Optional[int] = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_a : Optional[Any] = [(u, v) for u, v in zip(source, target)]
_a : Optional[int] = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 689 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = (DPMSolverSinglestepScheduler,)
_lowerCamelCase = (('''num_inference_steps''', 25),)
def lowerCamelCase ( self : Optional[int] , **lowerCamelCase : int ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
"""sample_max_value""": 1.0,
"""algorithm_type""": """dpmsolver++""",
"""solver_type""": """midpoint""",
"""lambda_min_clipped""": -float("""inf""" ),
"""variance_type""": None,
}
config.update(**lowerCamelCase )
return config
def lowerCamelCase ( self : Optional[int] , lowerCamelCase : List[str]=0 , **lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("""num_inference_steps""" , lowerCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**lowerCamelCase )
_UpperCAmelCase = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(lowerCamelCase )
new_scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase , _UpperCAmelCase = sample, sample
for t in range(lowerCamelCase , time_step + scheduler.config.solver_order + 1 ):
_UpperCAmelCase = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
def lowerCamelCase ( self : List[Any] , lowerCamelCase : Optional[int]=0 , **lowerCamelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("""num_inference_steps""" , lowerCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Any , lowerCamelCase : Dict=None , **lowerCamelCase : Optional[Any] ) -> str:
"""simple docstring"""
if scheduler is None:
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**lowerCamelCase )
_UpperCAmelCase = scheduler_class(**lowerCamelCase )
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**lowerCamelCase )
_UpperCAmelCase = scheduler_class(**lowerCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
return sample
def lowerCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase = 50
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
_UpperCAmelCase = model(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
_UpperCAmelCase = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def lowerCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def lowerCamelCase ( self : Any ) -> str:
"""simple docstring"""
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase = self.full_loop(scheduler=lowerCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
_UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_UpperCAmelCase = self.full_loop(scheduler=lowerCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def lowerCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCamelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase , prediction_type=lowerCamelCase , sample_max_value=lowerCamelCase , algorithm_type="""dpmsolver++""" , solver_order=lowerCamelCase , solver_type=lowerCamelCase , )
def lowerCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def lowerCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase , solver_type=lowerCamelCase , prediction_type=lowerCamelCase , algorithm_type=lowerCamelCase , )
_UpperCAmelCase = self.full_loop(
solver_order=lowerCamelCase , solver_type=lowerCamelCase , prediction_type=lowerCamelCase , algorithm_type=lowerCamelCase , )
assert not torch.isnan(lowerCamelCase ).any(), "Samples have nan numbers"
def lowerCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
self.check_over_configs(lower_order_final=lowerCamelCase )
self.check_over_configs(lower_order_final=lowerCamelCase )
def lowerCamelCase ( self : int ) -> int:
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float("""inf""" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def lowerCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
self.check_over_configs(variance_type=lowerCamelCase )
self.check_over_configs(variance_type="""learned_range""" )
def lowerCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=lowerCamelCase , time_step=0 )
def lowerCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.full_loop()
_UpperCAmelCase = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def lowerCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.full_loop(use_karras_sigmas=lowerCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def lowerCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.full_loop(prediction_type="""v_prediction""" )
_UpperCAmelCase = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def lowerCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.full_loop(prediction_type="""v_prediction""" , use_karras_sigmas=lowerCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def lowerCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(thresholding=lowerCamelCase , dynamic_thresholding_ratio=0 )
_UpperCAmelCase = scheduler_class(**lowerCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
assert sample.dtype == torch.floataa | 108 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ) -> Union[str, Any]:
__lowerCAmelCase = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
__lowerCAmelCase = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(lowercase )
# Let's go
__lowerCAmelCase = parser.parse_args()
if not hasattr(lowercase , """func""" ):
parser.print_help()
exit(1 )
# Run
__lowerCAmelCase = args.func(lowercase )
service.run()
if __name__ == "__main__":
main()
| 689 | 0 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
a = "naver-clova-ix/donut-base"
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = DonutProcessor.from_pretrained(lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {
"""name""": """John Doe""",
"""age""": """99""",
"""city""": """Atlanta""",
"""state""": """GA""",
"""zip""": """30301""",
"""phone""": """123-4567""",
"""nicknames""": [{"""nickname""": """Johnny"""}, {"""nickname""": """JD"""}],
}
__SCREAMING_SNAKE_CASE = (
"""<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"""
"""<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"""
"""<s_nicknames><s_nickname>Johnny</s_nickname>"""
"""<sep/><s_nickname>JD</s_nickname></s_nicknames>"""
)
__SCREAMING_SNAKE_CASE = self.processor.tokenajson(lowerCamelCase )
self.assertDictEqual(lowerCamelCase ,lowerCamelCase )
| 109 |
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_a : List[Any] = logging.get_logger(__name__)
_a : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
_a : Any = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> str:
for attribute in key.split(""".""" ):
__lowerCAmelCase = getattr(lowercase , lowercase )
if weight_type is not None:
__lowerCAmelCase = getattr(lowercase , lowercase ).shape
else:
__lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
__lowerCAmelCase = value
elif weight_type == "weight_g":
__lowerCAmelCase = value
elif weight_type == "weight_v":
__lowerCAmelCase = value
elif weight_type == "bias":
__lowerCAmelCase = value
else:
__lowerCAmelCase = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]:
__lowerCAmelCase = []
__lowerCAmelCase = fairseq_model.state_dict()
__lowerCAmelCase = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , )
__lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
__lowerCAmelCase = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split(""".""" )[:-1] ) != key):
# special case since naming is very similar
continue
__lowerCAmelCase = True
if "*" in mapped_key:
__lowerCAmelCase = name.split(lowercase )[0].split(""".""" )[-2]
__lowerCAmelCase = mapped_key.replace("""*""" , lowercase )
if "weight_g" in name:
__lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
__lowerCAmelCase = """weight_v"""
elif "bias" in name:
__lowerCAmelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase = """weight"""
else:
__lowerCAmelCase = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__lowerCAmelCase = full_name.split("""conv_layers.""" )[-1]
__lowerCAmelCase = name.split(""".""" )
__lowerCAmelCase = int(items[0] )
__lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Dict:
if config_path is not None:
__lowerCAmelCase = UniSpeechSatConfig.from_pretrained(lowercase )
else:
__lowerCAmelCase = UniSpeechSatConfig()
__lowerCAmelCase = """"""
if is_finetuned:
__lowerCAmelCase = UniSpeechSatForCTC(lowercase )
else:
__lowerCAmelCase = UniSpeechSatForPreTraining(lowercase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
__lowerCAmelCase = model[0].eval()
recursively_load_weights(lowercase , lowercase )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
_a : List[str] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_a : Union[str, Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 689 | 0 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.17.0.dev0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
lowerCAmelCase_ : int = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
__magic_name__ : Optional[str] = field(
default='''tab_fact''' , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
__magic_name__ : Optional[str] = field(
default='''tab_fact''' , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} , )
__magic_name__ : int = field(
default=1024 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__magic_name__ : bool = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
__magic_name__ : bool = field(
default=lowerCAmelCase_ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
__magic_name__ : Optional[int] = field(
default=lowerCAmelCase_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__magic_name__ : Optional[int] = field(
default=lowerCAmelCase_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
__magic_name__ : Optional[int] = field(
default=lowerCAmelCase_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
__magic_name__ : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''A csv or a json file containing the training data.'''} )
__magic_name__ : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
__magic_name__ : Optional[str] = field(default=lowerCAmelCase_ , metadata={'''help''': '''A csv or a json file containing the test data.'''} )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
a_ : Union[str, Any] = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
a_ : Optional[Any] = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class SCREAMING_SNAKE_CASE :
__magic_name__ : str = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__magic_name__ : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__magic_name__ : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__magic_name__ : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__magic_name__ : bool = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
__magic_name__ : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__magic_name__ : bool = field(
default=lowerCAmelCase_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
a_ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a_ , a_ , a_ : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a_ , a_ , a_ : List[str] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
a_ : str = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
datasets.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
a_ : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a_ : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
a_ : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
a_ : int = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
a_ : List[Any] = data_args.train_file.split(""".""" )[-1]
a_ : int = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
a_ : List[str] = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(F"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
a_ : Any = load_dataset("""csv""" , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
a_ : List[Any] = load_dataset("""json""" , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
a_ : int = raw_datasets["""train"""].features["""label"""].names
a_ : int = len(UpperCamelCase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a_ : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
a_ : Optional[int] = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=UpperCamelCase__ , )
a_ : Dict = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
a_ : Any = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
a_ : Union[str, Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
a_ : Optional[int] = {"""Refused""": 0, """Entailed""": 1}
a_ : Union[str, Any] = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
a_ : Union[str, Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(UpperCamelCase__ : List[str] ):
# Tokenize the texts
def _convert_table_text_to_pandas(UpperCamelCase__ : List[Any] ):
a_ : Optional[Any] = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
a_ : str = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
a_ : Optional[Any] = examples["""statement"""]
a_ : Union[str, Any] = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
a_ : Dict = tokenizer(UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ )
a_ : Dict = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
a_ : Dict = raw_datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
a_ : Dict = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
a_ : Optional[int] = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
a_ : List[Any] = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
a_ : Optional[int] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
a_ : Optional[int] = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
a_ : List[str] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(UpperCamelCase__ ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(UpperCamelCase__ : Any ):
a_ : List[Any] = p.predictions[0] if isinstance(p.predictions , UpperCamelCase__ ) else p.predictions
a_ : Dict = np.argmax(UpperCamelCase__ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
a_ : Optional[Any] = default_data_collator
elif training_args.fpaa:
a_ : Dict = DataCollatorWithPadding(UpperCamelCase__ , pad_to_multiple_of=8 )
else:
a_ : Union[str, Any] = None
# Initialize our Trainer
a_ : str = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=UpperCamelCase__ , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , )
# Training
if training_args.do_train:
a_ : List[str] = None
if training_args.resume_from_checkpoint is not None:
a_ : Optional[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a_ : Optional[int] = last_checkpoint
a_ : Tuple = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
a_ : Union[str, Any] = train_result.metrics
a_ : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ )
)
a_ : Dict = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , UpperCamelCase__ )
trainer.save_metrics("""train""" , UpperCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
a_ : Dict = trainer.evaluate(eval_dataset=UpperCamelCase__ )
a_ : Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ )
a_ : int = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics("""eval""" , UpperCamelCase__ )
trainer.save_metrics("""eval""" , UpperCamelCase__ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
a_ : str = predict_dataset.remove_columns("""label""" )
a_ : Tuple = trainer.predict(UpperCamelCase__ , metric_key_prefix="""predict""" ).predictions
a_ : Any = np.argmax(UpperCamelCase__ , axis=1 )
a_ : Optional[int] = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(UpperCamelCase__ , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(UpperCamelCase__ ):
a_ : Optional[Any] = label_list[item]
writer.write(F"{index}\t{item}\n" )
a_ : List[Any] = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase__ )
else:
trainer.create_model_card(**UpperCamelCase__ )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : int ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 442 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
_a : str = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
_a : Dict = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
_a : List[str] = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ),reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = spearmanr(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 689 | 0 |
from datetime import datetime as dt
import os
from github import Github
__lowerCamelCase = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def UpperCamelCase ( ):
snake_case : Optional[Any] = Github(os.environ["GITHUB_TOKEN"] )
snake_case : List[str] = g.get_repo("huggingface/transformers" )
snake_case : Any = repo.get_issues(state="open" )
for issue in open_issues:
snake_case : Tuple = sorted([comment for comment in issue.get_comments()] , key=lambda __lowerCamelCase : i.created_at , reverse=__lowerCamelCase )
snake_case : Optional[int] = comments[0] if len(__lowerCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 204 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=lowerCAmelCase_ ):
a : List[str] =["""onnx"""]
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(self,["""onnx"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""onnx"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""onnx"""] )
| 689 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase : str = KandinskyInpaintPipeline
UpperCamelCase : List[str] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
UpperCamelCase : List[str] = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
UpperCamelCase : Optional[int] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase : Union[str, Any] = False
@property
def SCREAMING_SNAKE_CASE( self :Union[str, Any] ) ->Optional[int]:
return 32
@property
def SCREAMING_SNAKE_CASE( self :Optional[Any] ) ->int:
return 32
@property
def SCREAMING_SNAKE_CASE( self :Tuple ) ->Optional[Any]:
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE( self :Optional[int] ) ->Any:
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE( self :int ) ->Optional[int]:
return 100
@property
def SCREAMING_SNAKE_CASE( self :List[Any] ) ->Optional[Any]:
lowercase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def SCREAMING_SNAKE_CASE( self :List[str] ) ->Dict:
torch.manual_seed(0 )
lowercase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
lowercase = MultilingualCLIP(__SCREAMING_SNAKE_CASE )
lowercase = text_encoder.eval()
return text_encoder
@property
def SCREAMING_SNAKE_CASE( self :Tuple ) ->Union[str, Any]:
torch.manual_seed(0 )
lowercase = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowercase = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def SCREAMING_SNAKE_CASE( self :Union[str, Any] ) ->Tuple:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE( self :Optional[int] ) ->List[str]:
torch.manual_seed(0 )
lowercase = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE( self :Optional[int] ) ->str:
lowercase = self.dummy_text_encoder
lowercase = self.dummy_tokenizer
lowercase = self.dummy_unet
lowercase = self.dummy_movq
lowercase = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , steps_offset=1 , prediction_type="epsilon" , thresholding=__SCREAMING_SNAKE_CASE , )
lowercase = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def SCREAMING_SNAKE_CASE( self :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[Any]=0 ) ->Optional[Any]:
lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__SCREAMING_SNAKE_CASE )
# create init_image
lowercase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert("RGB" ).resize((256, 256) )
# create mask
lowercase = np.ones((64, 64) , dtype=np.floataa )
lowercase = 0
if str(__SCREAMING_SNAKE_CASE ).startswith("mps" ):
lowercase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
lowercase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
lowercase = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def SCREAMING_SNAKE_CASE( self :Optional[int] ) ->Optional[int]:
lowercase = "cpu"
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
lowercase = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
lowercase = output.images
lowercase = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) , return_dict=__SCREAMING_SNAKE_CASE , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
lowercase = np.array(
[0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def SCREAMING_SNAKE_CASE( self :str ) ->Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE( self :List[Any] ) ->Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE( self :Any ) ->List[Any]:
lowercase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
lowercase = np.ones((768, 768) , dtype=np.floataa )
lowercase = 0
lowercase = "a hat"
lowercase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
lowercase = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
lowercase = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase , lowercase = pipe_prior(
__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
lowercase = pipeline(
__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , image_embeds=__SCREAMING_SNAKE_CASE , negative_image_embeds=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
lowercase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 441 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_a : int = logging.get_logger(__name__)
_a : Optional[int] = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[str] ="""gptj"""
a : Optional[int] ={
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self,__SCREAMING_SNAKE_CASE=5_04_00,__SCREAMING_SNAKE_CASE=20_48,__SCREAMING_SNAKE_CASE=40_96,__SCREAMING_SNAKE_CASE=28,__SCREAMING_SNAKE_CASE=16,__SCREAMING_SNAKE_CASE=64,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="gelu_new",__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=1e-5,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=False,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_embd
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_inner
__lowerCAmelCase = rotary_dim
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = use_cache
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(
bos_token_id=__SCREAMING_SNAKE_CASE,eos_token_id=__SCREAMING_SNAKE_CASE,tie_word_embeddings=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = "default",__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False,):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE,task=__SCREAMING_SNAKE_CASE,patching_specs=__SCREAMING_SNAKE_CASE,use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config,"""pad_token_id""",__SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
__lowerCAmelCase = 0
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE,direction="""inputs""" )
__lowerCAmelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._config.n_head
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,):
'''simple docstring'''
__lowerCAmelCase = super(__SCREAMING_SNAKE_CASE,self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE,batch_size=__SCREAMING_SNAKE_CASE,seq_length=__SCREAMING_SNAKE_CASE,is_pair=__SCREAMING_SNAKE_CASE,framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowerCAmelCase = seqlen + 2
__lowerCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCAmelCase = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
__lowerCAmelCase = common_inputs["""attention_mask"""]
if self.use_past:
__lowerCAmelCase = ordered_inputs["""attention_mask"""].dtype
__lowerCAmelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,dtype=__SCREAMING_SNAKE_CASE )],dim=1 )
return ordered_inputs
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 13
| 689 | 0 |
'''simple docstring'''
from __future__ import annotations
__lowerCAmelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : Any , ):
"""simple docstring"""
a_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowercase_ ) )
] # the reference grid
a_ = 1
a_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowercase_ ) )
] # the action grid
a_ = init[0]
a_ = init[1]
a_ = 0
a_ = g + heuristic[x][y] # cost from starting cell to destination cell
a_ = [[f, g, x, y]]
a_ = False # flag that is set when search is complete
a_ = False # flag set if we can't find expand
while not found and not resign:
if len(lowercase_ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
a_ = cell.pop()
a_ = next_cell[2]
a_ = next_cell[3]
a_ = next_cell[1]
if x == goal[0] and y == goal[1]:
a_ = True
else:
for i in range(len(lowercase_ ) ): # to try out different valid actions
a_ = x + DIRECTIONS[i][0]
a_ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(lowercase_ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
a_ = g + cost
a_ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
a_ = 1
a_ = i
a_ = []
a_ = goal[0]
a_ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
a_ = x - DIRECTIONS[action[x][y]][0]
a_ = y - DIRECTIONS[action[x][y]][1]
a_ = xa
a_ = ya
invpath.append([x, y] )
a_ = []
for i in range(len(lowercase_ ) ):
path.append(invpath[len(lowercase_ ) - 1 - i] )
return path, action
if __name__ == "__main__":
__lowerCAmelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__lowerCAmelCase = [0, 0]
# all coordinates are given in format [y,x]
__lowerCAmelCase = [len(grid) - 1, len(grid[0]) - 1]
__lowerCAmelCase = 1
# the cost map which pushes the path closer to the goal
__lowerCAmelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__lowerCAmelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__lowerCAmelCase = 99
__lowerCAmelCase = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 536 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase = 5000_0000 ) -> int:
__lowerCAmelCase = set()
__lowerCAmelCase = int((limit - 24) ** (1 / 2) )
__lowerCAmelCase = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowercase ) ) )
for primea in primes:
__lowerCAmelCase = primea * primea
for primea in primes:
__lowerCAmelCase = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
__lowerCAmelCase = primea * primea * primea * primea
__lowerCAmelCase = square + cube + tetr
if total >= limit:
break
ret.add(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(f'{solution() = }')
| 689 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class _UpperCamelCase ( lowerCAmelCase_ ):
'''simple docstring'''
__UpperCAmelCase : Optional[torch.FloatTensor] =None
__UpperCAmelCase : torch.FloatTensor =None
__UpperCAmelCase : Optional[Tuple[torch.FloatTensor]] =None
__UpperCAmelCase : Optional[Tuple[torch.FloatTensor]] =None
class _UpperCamelCase ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self , __a=1 , __a=0 , __a=2 , __a=5_12 , __a="cls" , __a=False , __a=True , **__a , ):
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = project_dim
__lowerCAmelCase = pooler_fn
__lowerCAmelCase = learn_encoder
__lowerCAmelCase = use_attention_mask
class _UpperCamelCase ( lowerCAmelCase_ ):
'''simple docstring'''
__UpperCAmelCase : Any =[R"""pooler""", R"""logit_scale"""]
__UpperCAmelCase : Union[str, Any] =[R"""position_ids""", R"""predictions.decoder.bias"""]
__UpperCAmelCase : Optional[int] ="""roberta"""
__UpperCAmelCase : List[str] =RobertaSeriesConfig
def __init__( self , __a ):
super().__init__(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = XLMRobertaModel(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = nn.Linear(config.hidden_size , config.project_dim )
__lowerCAmelCase = getattr(__SCREAMING_SNAKE_CASE , "has_pre_transformation" , __SCREAMING_SNAKE_CASE )
if self.has_pre_transformation:
__lowerCAmelCase = nn.Linear(config.hidden_size , config.project_dim )
__lowerCAmelCase = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def snake_case ( self , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , ):
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = self.base_model(
input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , position_ids=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , inputs_embeds=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__SCREAMING_SNAKE_CASE , )
if self.has_pre_transformation:
__lowerCAmelCase = outputs["hidden_states"][-2]
__lowerCAmelCase = self.pre_LN(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.transformation_pre(__SCREAMING_SNAKE_CASE )
return TransformationModelOutput(
projection_state=__SCREAMING_SNAKE_CASE , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__lowerCAmelCase = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__SCREAMING_SNAKE_CASE , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 636 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Optional[int] =TextToVideoSDPipeline
a : Optional[int] =TEXT_TO_IMAGE_PARAMS
a : Any =TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a : Union[str, Any] =frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D"""),up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D"""),cross_attention_dim=32,attention_head_dim=4,)
__lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0085,beta_end=0.012,beta_schedule="""scaled_linear""",clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,)
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],latent_channels=4,sample_size=1_28,)
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1e-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=10_00,hidden_act="""gelu""",projection_dim=5_12,)
__lowerCAmelCase = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = TextToVideoSDPipeline(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = """np"""
__lowerCAmelCase = sd_pipe(**__SCREAMING_SNAKE_CASE ).frames
__lowerCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__lowerCAmelCase = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available(),reason="""XFormers attention is only available with CUDA and `xformers` installed""",)
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
__lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowerCAmelCase = pipe.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=25,output_type="""pt""" ).frames
__lowerCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
__lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase = pipe.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=2,output_type="""pt""" ).frames
__lowerCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 689 | 0 |
"""simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
if dst_width < 0 or dst_height < 0:
raise ValueError("Destination width/height should be > 0" )
snake_case : int = img
snake_case : str = img.shape[1]
snake_case : int = img.shape[0]
snake_case : Any = dst_width
snake_case : Optional[Any] = dst_height
snake_case : List[Any] = self.src_w / self.dst_w
snake_case : Tuple = self.src_h / self.dst_h
snake_case : int = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
for i in range(self.dst_h ):
for j in range(self.dst_w ):
snake_case : Tuple = self.img[self.get_y(__SCREAMING_SNAKE_CASE )][self.get_x(__SCREAMING_SNAKE_CASE )]
def lowerCamelCase ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
return int(self.ratio_x * x )
def lowerCamelCase ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
return int(self.ratio_y * y )
if __name__ == "__main__":
__snake_case = 800, 600
__snake_case = imread("""image_data/lena.jpg""", 1)
__snake_case = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 178 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _lowerCAmelCase ( lowercase ) -> Optional[int]:
if not is_accelerate_available():
return method
__lowerCAmelCase = version.parse(accelerate.__version__ ).base_version
if version.parse(lowercase ) < version.parse("""0.17.0""" ):
return method
def wrapper(self , *lowercase , **lowercase ):
if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ):
self._hf_hook.pre_forward(self )
return method(self , *lowercase , **lowercase )
return wrapper
| 689 | 0 |
from ...processing_utils import ProcessorMixin
class a ( lowerCAmelCase_ ):
__lowerCAmelCase : Tuple = """SpeechT5FeatureExtractor"""
__lowerCAmelCase : str = """SpeechT5Tokenizer"""
def __init__( self :List[Any] ,__lowercase :List[str] ,__lowercase :Dict ):
super().__init__(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
def __call__( self :Optional[int] ,*__lowercase :Dict ,**__lowercase :Optional[Any] ):
snake_case__ : str = kwargs.pop('''audio''' ,__SCREAMING_SNAKE_CASE )
snake_case__ : int = kwargs.pop('''text''' ,__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = kwargs.pop('''text_target''' ,__SCREAMING_SNAKE_CASE )
snake_case__ : str = kwargs.pop('''audio_target''' ,__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = kwargs.pop('''sampling_rate''' ,__SCREAMING_SNAKE_CASE )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
snake_case__ : List[Any] = self.feature_extractor(__SCREAMING_SNAKE_CASE ,*__SCREAMING_SNAKE_CASE ,sampling_rate=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
elif text is not None:
snake_case__ : Dict = self.tokenizer(__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
else:
snake_case__ : int = None
if audio_target is not None:
snake_case__ : Dict = self.feature_extractor(audio_target=__SCREAMING_SNAKE_CASE ,*__SCREAMING_SNAKE_CASE ,sampling_rate=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = targets['''input_values''']
elif text_target is not None:
snake_case__ : Optional[Any] = self.tokenizer(__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = targets['''input_ids''']
else:
snake_case__ : Optional[Any] = None
if inputs is None:
return targets
if targets is not None:
snake_case__ : Union[str, Any] = labels
snake_case__ : Union[str, Any] = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
snake_case__ : int = decoder_attention_mask
return inputs
def __lowerCamelCase ( self :Any ,*__lowercase :Optional[Any] ,**__lowercase :List[Any] ):
snake_case__ : int = kwargs.pop('''input_values''' ,__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = kwargs.pop('''input_ids''' ,__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = kwargs.pop('''labels''' ,__SCREAMING_SNAKE_CASE )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
snake_case__ : List[Any] = self.feature_extractor.pad(__SCREAMING_SNAKE_CASE ,*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
elif input_ids is not None:
snake_case__ : Optional[Any] = self.tokenizer.pad(__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
else:
snake_case__ : List[Any] = None
if labels is not None:
if "input_ids" in labels or (isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) and "input_ids" in labels[0]):
snake_case__ : Tuple = self.tokenizer.pad(__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
snake_case__ : str = targets['''input_ids''']
else:
snake_case__ : List[str] = self.feature_extractor.feature_size
snake_case__ : List[str] = self.feature_extractor.num_mel_bins
snake_case__ : Optional[int] = self.feature_extractor.pad(__SCREAMING_SNAKE_CASE ,*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = feature_size_hack
snake_case__ : Any = targets['''input_values''']
else:
snake_case__ : Optional[int] = None
if inputs is None:
return targets
if targets is not None:
snake_case__ : str = labels
snake_case__ : List[str] = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
snake_case__ : Optional[int] = decoder_attention_mask
return inputs
def __lowerCamelCase ( self :str ,*__lowercase :Any ,**__lowercase :Tuple ):
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self :str ,*__lowercase :Optional[Any] ,**__lowercase :List[Any] ):
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
| 252 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
# load base model
__lowerCAmelCase = StableDiffusionPipeline.from_pretrained(lowercase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
__lowerCAmelCase = load_file(lowercase )
__lowerCAmelCase = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
__lowerCAmelCase = pipeline.text_encoder
else:
__lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
__lowerCAmelCase = pipeline.unet
# find the target layer
__lowerCAmelCase = layer_infos.pop(0 )
while len(lowercase ) > -1:
try:
__lowerCAmelCase = curr_layer.__getattr__(lowercase )
if len(lowercase ) > 0:
__lowerCAmelCase = layer_infos.pop(0 )
elif len(lowercase ) == 0:
break
except Exception:
if len(lowercase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
__lowerCAmelCase = layer_infos.pop(0 )
__lowerCAmelCase = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(lowercase )
else:
pair_keys.append(lowercase )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
__lowerCAmelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
__lowerCAmelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase ).unsqueeze(2 ).unsqueeze(3 )
else:
__lowerCAmelCase = state_dict[pair_keys[0]].to(torch.floataa )
__lowerCAmelCase = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase )
# update visited list
for item in pair_keys:
visited.append(lowercase )
return pipeline
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
_a : Optional[int] = parser.parse_args()
_a : Dict = args.base_model_path
_a : Optional[Any] = args.checkpoint_path
_a : Union[str, Any] = args.dump_path
_a : Optional[int] = args.lora_prefix_unet
_a : int = args.lora_prefix_text_encoder
_a : str = args.alpha
_a : Any = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_a : Tuple = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 689 | 0 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_a: int = logging.get_logger(__name__)
# General docstring
_a: Any = """RegNetConfig"""
# Base docstring
_a: List[str] = """facebook/regnet-y-040"""
_a: Optional[int] = [1, 1088, 7, 7]
# Image classification docstring
_a: Dict = """facebook/regnet-y-040"""
_a: str = """tabby, tabby cat"""
_a: List[str] = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __UpperCamelCase ( nn.Module ):
def __init__( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] = 3 , lowerCAmelCase : List[Any] = 1 , lowerCAmelCase : str = 1 , lowerCAmelCase : Dict = "relu" , ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ = nn.Convad(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=__SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , padding=kernel_size // 2 , groups=__SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ = nn.BatchNormad(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = ACTaFN[activation] if activation is not None else nn.Identity()
def __A ( self : int , lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.convolution(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = self.normalization(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = self.activation(__SCREAMING_SNAKE_CASE )
return hidden_state
class __UpperCamelCase ( nn.Module ):
def __init__( self : Tuple , lowerCAmelCase : Tuple ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
UpperCAmelCase_ = config.num_channels
def __A ( self : Optional[int] , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
UpperCAmelCase_ = self.embedder(__SCREAMING_SNAKE_CASE )
return hidden_state
class __UpperCamelCase ( nn.Module ):
def __init__( self : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] = 2 ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ = nn.Convad(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 , stride=__SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = nn.BatchNormad(__SCREAMING_SNAKE_CASE )
def __A ( self : Tuple , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = self.convolution(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = self.normalization(__SCREAMING_SNAKE_CASE )
return hidden_state
class __UpperCamelCase ( nn.Module ):
def __init__( self : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ = nn.AdaptiveAvgPoolad((1, 1) )
UpperCAmelCase_ = nn.Sequential(
nn.Convad(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 ) , nn.ReLU() , nn.Convad(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 ) , nn.Sigmoid() , )
def __A ( self : Tuple , lowerCAmelCase : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.pooler(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = self.attention(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = hidden_state * attention
return hidden_state
class __UpperCamelCase ( nn.Module ):
def __init__( self : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Dict = 1 ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ = in_channels != out_channels or stride != 1
UpperCAmelCase_ = max(1 , out_channels // config.groups_width )
UpperCAmelCase_ = (
RegNetShortCut(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
UpperCAmelCase_ = nn.Sequential(
RegNetConvLayer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , groups=__SCREAMING_SNAKE_CASE , activation=config.hidden_act ) , RegNetConvLayer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 , activation=__SCREAMING_SNAKE_CASE ) , )
UpperCAmelCase_ = ACTaFN[config.hidden_act]
def __A ( self : Dict , lowerCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ = hidden_state
UpperCAmelCase_ = self.layer(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = self.shortcut(__SCREAMING_SNAKE_CASE )
hidden_state += residual
UpperCAmelCase_ = self.activation(__SCREAMING_SNAKE_CASE )
return hidden_state
class __UpperCamelCase ( nn.Module ):
def __init__( self : str , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] = 1 ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ = in_channels != out_channels or stride != 1
UpperCAmelCase_ = max(1 , out_channels // config.groups_width )
UpperCAmelCase_ = (
RegNetShortCut(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
UpperCAmelCase_ = nn.Sequential(
RegNetConvLayer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , groups=__SCREAMING_SNAKE_CASE , activation=config.hidden_act ) , RegNetSELayer(__SCREAMING_SNAKE_CASE , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 , activation=__SCREAMING_SNAKE_CASE ) , )
UpperCAmelCase_ = ACTaFN[config.hidden_act]
def __A ( self : Any , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = hidden_state
UpperCAmelCase_ = self.layer(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = self.shortcut(__SCREAMING_SNAKE_CASE )
hidden_state += residual
UpperCAmelCase_ = self.activation(__SCREAMING_SNAKE_CASE )
return hidden_state
class __UpperCamelCase ( nn.Module ):
def __init__( self : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] = 2 , lowerCAmelCase : int = 2 , ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ = RegNetXLayer if config.layer_type == "x" else RegNetYLayer
UpperCAmelCase_ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , ) , *[layer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for _ in range(depth - 1 )] , )
def __A ( self : Union[str, Any] , lowerCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ = self.layers(__SCREAMING_SNAKE_CASE )
return hidden_state
class __UpperCamelCase ( nn.Module ):
def __init__( self : str , lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
__SCREAMING_SNAKE_CASE , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
UpperCAmelCase_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__SCREAMING_SNAKE_CASE , config.depths[1:] ):
self.stages.append(RegNetStage(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , depth=__SCREAMING_SNAKE_CASE ) )
def __A ( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : List[Any] = False , lowerCAmelCase : str = True ):
'''simple docstring'''
UpperCAmelCase_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCAmelCase_ = hidden_states + (hidden_state,)
UpperCAmelCase_ = stage_module(__SCREAMING_SNAKE_CASE )
if output_hidden_states:
UpperCAmelCase_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__SCREAMING_SNAKE_CASE , hidden_states=__SCREAMING_SNAKE_CASE )
class __UpperCamelCase ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = RegNetConfig
SCREAMING_SNAKE_CASE__ = """regnet"""
SCREAMING_SNAKE_CASE__ = """pixel_values"""
SCREAMING_SNAKE_CASE__ = True
def __A ( self : List[str] , lowerCAmelCase : List[Any] ):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" )
elif isinstance(__SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __A ( self : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int]=False ):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = value
_a: str = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
_a: Tuple = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , lowerCAmelCase_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self : int , lowerCAmelCase : List[str] ):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = config
UpperCAmelCase_ = RegNetEmbeddings(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = RegNetEncoder(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __A ( self : int , lowerCAmelCase : Dict , lowerCAmelCase : str = None , lowerCAmelCase : str = None ):
'''simple docstring'''
UpperCAmelCase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase_ = self.embedder(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = self.encoder(
__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = encoder_outputs[0]
UpperCAmelCase_ = self.pooler(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__SCREAMING_SNAKE_CASE , pooler_output=__SCREAMING_SNAKE_CASE , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , lowerCAmelCase_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self : Optional[Any] , lowerCAmelCase : Tuple ):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = config.num_labels
UpperCAmelCase_ = RegNetModel(__SCREAMING_SNAKE_CASE )
# classification head
UpperCAmelCase_ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __A ( self : List[str] , lowerCAmelCase : Optional[Any] = None , lowerCAmelCase : Dict = None , lowerCAmelCase : List[str] = None , lowerCAmelCase : Tuple = None , ):
'''simple docstring'''
UpperCAmelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase_ = self.regnet(__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = outputs.pooler_output if return_dict else outputs[1]
UpperCAmelCase_ = self.classifier(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCAmelCase_ = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCAmelCase_ = "single_label_classification"
else:
UpperCAmelCase_ = "multi_label_classification"
if self.config.problem_type == "regression":
UpperCAmelCase_ = MSELoss()
if self.num_labels == 1:
UpperCAmelCase_ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCAmelCase_ = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif self.config.problem_type == "single_label_classification":
UpperCAmelCase_ = CrossEntropyLoss()
UpperCAmelCase_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCAmelCase_ = BCEWithLogitsLoss()
UpperCAmelCase_ = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not return_dict:
UpperCAmelCase_ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states ) | 162 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def _lowerCAmelCase ( lowercase = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2
def _lowerCAmelCase ( lowercase = "" ) -> bool:
if len(lowercase ) == 0:
return True
__lowerCAmelCase = input_str.replace(""" """ , """""" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__lowerCAmelCase = {}
for character in lower_case_input_str:
__lowerCAmelCase = character_freq_dict.get(lowercase , 0 ) + 1
__lowerCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _lowerCAmelCase ( lowercase = "" ) -> None:
print("""\nFor string = """ , lowercase , """:""" )
print(
"""> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(lowercase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
print(
"""> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(lowercase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
if __name__ == "__main__":
_a : int = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
_a : Optional[int] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
| 689 | 0 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_UpperCamelCase = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__A : int = test_results.split(" " )
__A : Any = 0
__A : Dict = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
__A : Any = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(SCREAMING_SNAKE_CASE ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__A : int = {}
__A : Dict = None
__A : Optional[Any] = False
for line in failures_short_lines.split("\n" ):
if re.search(r"_ \[doctest\]" , SCREAMING_SNAKE_CASE ):
__A : int = True
__A : Dict = line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
__A : Optional[Any] = line
__A : Any = False
return failures
class __magic_name__ :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = title
__A : Any = doc_test_results["time_spent"].split("," )[0]
__A : Any = doc_test_results["success"]
__A : Tuple = doc_test_results["failures"]
__A : Dict = self.n_success + self.n_failures
# Failures and success of the modeling tests
__A : List[str] = doc_test_results
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : List[Any] = [self._time_spent]
__A : Union[str, Any] = 0
for time in time_spent:
__A : List[Any] = time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__SCREAMING_SNAKE_CASE ) == 1:
__A : List[str] = [0, 0, time_parts[0]]
__A ,__A ,__A : int = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
__A ,__A ,__A : int = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f"{int(__SCREAMING_SNAKE_CASE )}h{int(__SCREAMING_SNAKE_CASE )}m{int(__SCREAMING_SNAKE_CASE )}s"
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Optional[Any] = 40
__A : Optional[int] = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
__A : str = ""
for category, failures in category_failures.items():
if len(__SCREAMING_SNAKE_CASE ) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__SCREAMING_SNAKE_CASE )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Any = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__SCREAMING_SNAKE_CASE )
@staticmethod
def lowerCAmelCase__ ( ):
'''simple docstring'''
__A : List[str] = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(__SCREAMING_SNAKE_CASE )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=__SCREAMING_SNAKE_CASE , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
__A : int = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else "All tests passed."
__A : str = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=__SCREAMING_SNAKE_CASE , )
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__A : str = ""
for key, value in failures.items():
__A : Optional[int] = value[:200] + " [Truncated]" if len(__SCREAMING_SNAKE_CASE ) > 250 else value
failures_text += f"*{key}*\n_{value}_\n\n"
__A : Any = job_name
__A : Any = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
__A : List[str] = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
__A : Optional[Any] = self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
__A : int = sorted(self.doc_test_results.items() , key=lambda lowerCamelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
__A : Union[str, Any] = f"*Num failures* :{len(job_result['failed'] )} \n"
__A : int = job_result["failures"]
__A : Any = self.get_reply_blocks(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , text=__SCREAMING_SNAKE_CASE )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=f"Results for {job}" , blocks=__SCREAMING_SNAKE_CASE , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def _lowercase ():
'''simple docstring'''
__A : Dict = os.environ["GITHUB_RUN_ID"]
__A : Any = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
__A : Optional[int] = requests.get(SCREAMING_SNAKE_CASE ).json()
__A : Optional[Any] = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
__A : Tuple = math.ceil((result["total_count"] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE ):
__A : str = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , SCREAMING_SNAKE_CASE )
return {}
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__A : str = {}
if os.path.exists(SCREAMING_SNAKE_CASE ):
__A : Optional[Any] = os.listdir(SCREAMING_SNAKE_CASE )
for file in files:
try:
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , encoding="utf-8" ) as f:
__A : List[Any] = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}." ) from e
return _artifact
def _lowercase ():
'''simple docstring'''
class __magic_name__ :
"""simple docstring"""
def __init__( self , lowerCamelCase ):
'''simple docstring'''
__A : Dict = name
__A : List[str] = []
def __str__( self ):
'''simple docstring'''
return self.name
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
self.paths.append({"name": self.name, "path": path} )
__A : str = {}
__A : Dict = filter(os.path.isdir , os.listdir() )
for directory in directories:
__A : Any = directory
if artifact_name not in _available_artifacts:
__A : str = Artifact(SCREAMING_SNAKE_CASE )
_available_artifacts[artifact_name].add_path(SCREAMING_SNAKE_CASE )
return _available_artifacts
if __name__ == "__main__":
_UpperCamelCase = get_job_links()
_UpperCamelCase = retrieve_available_artifacts()
_UpperCamelCase = collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_UpperCamelCase = {
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_UpperCamelCase = github_actions_job_links.get("""run_doctests""")
_UpperCamelCase = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
_UpperCamelCase = retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
_UpperCamelCase = handle_test_results(artifact["""stats"""])
_UpperCamelCase = failed
_UpperCamelCase = success
_UpperCamelCase = time_spent[1:-1] + """, """
_UpperCamelCase = extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
_UpperCamelCase = line.replace("""FAILED """, """""")
_UpperCamelCase = line.split()[0].replace("""\n""", """""")
if "::" in line:
_UpperCamelCase = line.split("""::""")
else:
_UpperCamelCase = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_UpperCamelCase = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_UpperCamelCase = all_failures[test] if test in all_failures else """N/A"""
_UpperCamelCase = failure
break
_UpperCamelCase = Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 111 |
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _lowerCAmelCase ( lowercase ) -> List[Any]:
__lowerCAmelCase = VideoMAEConfig()
set_architecture_configs(lowercase , lowercase )
if "finetuned" not in model_name:
__lowerCAmelCase = False
if "finetuned" in model_name:
__lowerCAmelCase = """huggingface/label-files"""
if "kinetics" in model_name:
__lowerCAmelCase = 400
__lowerCAmelCase = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
__lowerCAmelCase = 174
__lowerCAmelCase = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
__lowerCAmelCase = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="""dataset""" ) , """r""" ) )
__lowerCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase ( lowercase , lowercase ) -> Any:
if "small" in model_name:
__lowerCAmelCase = 384
__lowerCAmelCase = 1536
__lowerCAmelCase = 12
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 3
__lowerCAmelCase = 192
__lowerCAmelCase = 768
elif "large" in model_name:
__lowerCAmelCase = 1024
__lowerCAmelCase = 4096
__lowerCAmelCase = 24
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 8
__lowerCAmelCase = 512
__lowerCAmelCase = 2048
elif "huge" in model_name:
__lowerCAmelCase = 1280
__lowerCAmelCase = 5120
__lowerCAmelCase = 32
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 8
__lowerCAmelCase = 640
__lowerCAmelCase = 2560
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def _lowerCAmelCase ( lowercase ) -> List[str]:
if "encoder." in name:
__lowerCAmelCase = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
__lowerCAmelCase = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
__lowerCAmelCase = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
__lowerCAmelCase = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__lowerCAmelCase = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
__lowerCAmelCase = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
__lowerCAmelCase = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
__lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
__lowerCAmelCase = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
__lowerCAmelCase = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
__lowerCAmelCase = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
__lowerCAmelCase = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
__lowerCAmelCase = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__lowerCAmelCase = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__lowerCAmelCase = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
__lowerCAmelCase = name.replace("""head""" , """classifier""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]:
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(lowercase )
if key.startswith("""encoder.""" ):
__lowerCAmelCase = key.replace("""encoder.""" , """""" )
if "qkv" in key:
__lowerCAmelCase = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
__lowerCAmelCase = config.decoder_hidden_size
__lowerCAmelCase = int(key_split[2] )
__lowerCAmelCase = """decoder.decoder_layers."""
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = config.hidden_size
__lowerCAmelCase = int(key_split[1] )
__lowerCAmelCase = """videomae.encoder.layer."""
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val
return orig_state_dict
def _lowerCAmelCase ( ) -> str:
__lowerCAmelCase = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__lowerCAmelCase = np.load(lowercase )
return list(lowercase )
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__lowerCAmelCase = get_videomae_config(lowercase )
if "finetuned" in model_name:
__lowerCAmelCase = VideoMAEForVideoClassification(lowercase )
else:
__lowerCAmelCase = VideoMAEForPreTraining(lowercase )
# download original checkpoint, hosted on Google Drive
__lowerCAmelCase = """pytorch_model.bin"""
gdown.cached_download(lowercase , lowercase , quiet=lowercase )
__lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" )
if "model" in files:
__lowerCAmelCase = files["""model"""]
else:
__lowerCAmelCase = files["""module"""]
__lowerCAmelCase = convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
# verify model on basic input
__lowerCAmelCase = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__lowerCAmelCase = prepare_video()
__lowerCAmelCase = image_processor(lowercase , return_tensors="""pt""" )
if "finetuned" not in model_name:
__lowerCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
__lowerCAmelCase = torch.load(lowercase )
__lowerCAmelCase = model(**lowercase )
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
__lowerCAmelCase = torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(f'Model name not supported. Should be one of {model_names}' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , lowercase , atol=1e-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__lowerCAmelCase = outputs.loss
assert torch.allclose(lowercase , lowercase , atol=1e-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model and image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase )
model.save_pretrained(lowercase )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase , organization="""nielsr""" )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_a : int = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 689 | 0 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
a_ = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
a_ = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
a_ = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def _lowerCAmelCase ( self: Optional[int]) ->Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string"),
"prediction_text": datasets.features.Sequence(datasets.Value("string")),
},
"references": {
"id": datasets.Value("string"),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string"),
"answer_start": datasets.Value("int32"),
}),
},
}) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def _lowerCAmelCase ( self: Tuple , a: Optional[Any] , a: Union[str, Any]) ->int:
'''simple docstring'''
a_ = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
a_ = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
a_ = evaluate(dataset=__SCREAMING_SNAKE_CASE , predictions=__SCREAMING_SNAKE_CASE)
return score
| 685 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_a : Tuple = """\
"""
_a : Tuple = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
_a : Optional[Any] = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ),reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = 16,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__lowerCAmelCase = """cuda"""
else:
__lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__lowerCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__SCREAMING_SNAKE_CASE ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__lowerCAmelCase = model.config.max_length - 1
else:
__lowerCAmelCase = model.config.max_length
__lowerCAmelCase = tokenizer(
__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,padding=__SCREAMING_SNAKE_CASE,truncation=__SCREAMING_SNAKE_CASE,max_length=__SCREAMING_SNAKE_CASE,return_tensors="""pt""",return_attention_mask=__SCREAMING_SNAKE_CASE,).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = encodings["""input_ids"""]
__lowerCAmelCase = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ),1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ),2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__lowerCAmelCase = []
__lowerCAmelCase = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0,len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase = min(start_index + batch_size,len(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = encoded_texts[start_index:end_index]
__lowerCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
__lowerCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch],dim=1 )
__lowerCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size(),dtype=torch.intaa ).to(__SCREAMING_SNAKE_CASE ), attn_mask],dim=1 )
__lowerCAmelCase = encoded_batch
with torch.no_grad():
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE ).logits
__lowerCAmelCase = out_logits[..., :-1, :].contiguous()
__lowerCAmelCase = labels[..., 1:].contiguous()
__lowerCAmelCase = attn_mask[..., 1:].contiguous()
__lowerCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1,2 ),__SCREAMING_SNAKE_CASE ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__SCREAMING_SNAKE_CASE )}
| 689 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.