code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
a = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.6_0_9_3_4_4,
"knot": 1.8_5_2,
}
a = {
"km/h": 1.0,
"m/s": 0.2_7_7_7_7_7_7_7_8,
"mph": 0.6_2_1_3_7_1_1_9_2,
"knot": 0.5_3_9_9_5_6_8_0_3,
}
def UpperCamelCase_( __magic_name__ : float , __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
_lowerCAmelCase :Tuple = (
f"""Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"""
f"""Valid values are: {", ".join(__magic_name__ )}"""
)
raise ValueError(__magic_name__ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 687 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a = 6_3_7_8_1_3_7.0
a = 6_3_5_6_7_5_2.3_1_4_2_4_5
a = 6_378_137
def UpperCamelCase_( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , __magic_name__ : float ):
"""simple docstring"""
_lowerCAmelCase :List[Any] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_lowerCAmelCase :Union[str, Any] = atan((1 - flattening) * tan(radians(__magic_name__ ) ) )
_lowerCAmelCase :List[str] = atan((1 - flattening) * tan(radians(__magic_name__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_lowerCAmelCase :int = haversine_distance(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_lowerCAmelCase :str = (b_lata + b_lata) / 2
_lowerCAmelCase :Tuple = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_lowerCAmelCase :str = (sin(__magic_name__ ) ** 2) * (cos(__magic_name__ ) ** 2)
_lowerCAmelCase :Optional[int] = cos(sigma / 2 ) ** 2
_lowerCAmelCase :List[Any] = (sigma - sin(__magic_name__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_lowerCAmelCase :Dict = (cos(__magic_name__ ) ** 2) * (sin(__magic_name__ ) ** 2)
_lowerCAmelCase :str = sin(sigma / 2 ) ** 2
_lowerCAmelCase :Union[str, Any] = (sigma + sin(__magic_name__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod() | 687 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Tuple = ShapEImgaImgPipeline
lowerCamelCase : Optional[Any] = ['image']
lowerCamelCase : Union[str, Any] = ['image']
lowerCamelCase : Dict = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
lowerCamelCase : Tuple = False
@property
def SCREAMING_SNAKE_CASE__ ( self: Any ):
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self: Any ):
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
return 8
@property
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
torch.manual_seed(0 )
_lowerCAmelCase :List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
_lowerCAmelCase :List[str] = CLIPVisionModel(_UpperCAmelCase )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Dict = CLIPImageProcessor(
crop_size=224 , do_center_crop=_UpperCAmelCase , do_normalize=_UpperCAmelCase , do_resize=_UpperCAmelCase , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , )
return image_processor
@property
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
torch.manual_seed(0 )
_lowerCAmelCase :Any = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
_lowerCAmelCase :List[Any] = PriorTransformer(**_UpperCAmelCase )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
torch.manual_seed(0 )
_lowerCAmelCase :int = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
_lowerCAmelCase :Union[str, Any] = ShapERenderer(**_UpperCAmelCase )
return model
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Any = self.dummy_prior
_lowerCAmelCase :Dict = self.dummy_image_encoder
_lowerCAmelCase :Union[str, Any] = self.dummy_image_processor
_lowerCAmelCase :str = self.dummy_renderer
_lowerCAmelCase :Any = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , )
_lowerCAmelCase :List[Any] = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Tuple , _UpperCAmelCase: Dict=0 ):
_lowerCAmelCase :Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith('mps' ):
_lowerCAmelCase :int = torch.manual_seed(_UpperCAmelCase )
else:
_lowerCAmelCase :Tuple = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Optional[int] = 'cpu'
_lowerCAmelCase :List[str] = self.get_dummy_components()
_lowerCAmelCase :Optional[Any] = self.pipeline_class(**_UpperCAmelCase )
_lowerCAmelCase :Dict = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
_lowerCAmelCase :str = output.images[0]
_lowerCAmelCase :Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_lowerCAmelCase :Any = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Optional[Any] = torch_device == 'cpu'
_lowerCAmelCase :Optional[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , )
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :int = self.get_dummy_components()
_lowerCAmelCase :Dict = self.pipeline_class(**_UpperCAmelCase )
_lowerCAmelCase :Tuple = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_lowerCAmelCase :Tuple = 1
_lowerCAmelCase :Optional[int] = 2
_lowerCAmelCase :str = self.get_dummy_inputs(_UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
_lowerCAmelCase :List[str] = batch_size * [inputs[key]]
_lowerCAmelCase :Union[str, Any] = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
_lowerCAmelCase :Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
_lowerCAmelCase :Tuple = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
_lowerCAmelCase :Union[str, Any] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_lowerCAmelCase :str = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase :Tuple = pipe(
_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase ) | 687 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Dict = 'encoder-decoder'
lowerCamelCase : Optional[Any] = True
def __init__( self: str , **_UpperCAmelCase: int ):
super().__init__(**_UpperCAmelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_lowerCAmelCase :Optional[Any] = kwargs.pop('encoder' )
_lowerCAmelCase :Dict = encoder_config.pop('model_type' )
_lowerCAmelCase :str = kwargs.pop('decoder' )
_lowerCAmelCase :str = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase :str = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Tuple = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Any = True
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls: Tuple , _UpperCAmelCase: PretrainedConfig , _UpperCAmelCase: PretrainedConfig , **_UpperCAmelCase: str ):
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
_lowerCAmelCase :Dict = True
_lowerCAmelCase :List[str] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowerCAmelCase :Optional[int] = self.encoder.to_dict()
_lowerCAmelCase :Union[str, Any] = self.decoder.to_dict()
_lowerCAmelCase :List[str] = self.__class__.model_type
return output | 687 | 1 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :List[str] = hf_hub_url(repo_id=__magic_name__ , path=__magic_name__ , revision=__magic_name__ )
assert url == f"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(__magic_name__ )}""" | 687 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: int , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=13 , _UpperCAmelCase: Optional[Any]=32 , _UpperCAmelCase: List[Any]=2 , _UpperCAmelCase: Optional[int]=3 , _UpperCAmelCase: Optional[int]=16 , _UpperCAmelCase: Optional[Any]=[32, 64, 128] , _UpperCAmelCase: Optional[int]=[1, 2, 1] , _UpperCAmelCase: int=[2, 2, 4] , _UpperCAmelCase: List[str]=2 , _UpperCAmelCase: Dict=2.0 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: str=0.0 , _UpperCAmelCase: int=0.0 , _UpperCAmelCase: str=0.1 , _UpperCAmelCase: Dict="gelu" , _UpperCAmelCase: Optional[Any]=False , _UpperCAmelCase: Union[str, Any]=True , _UpperCAmelCase: Union[str, Any]=0.0_2 , _UpperCAmelCase: Optional[int]=1e-5 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: Optional[Any]=None , _UpperCAmelCase: Tuple=True , _UpperCAmelCase: str=10 , _UpperCAmelCase: int=8 , _UpperCAmelCase: List[Any]=["stage1", "stage2"] , _UpperCAmelCase: List[Any]=[1, 2] , ):
_lowerCAmelCase :Optional[int] = parent
_lowerCAmelCase :Dict = batch_size
_lowerCAmelCase :Optional[Any] = image_size
_lowerCAmelCase :Optional[Any] = patch_size
_lowerCAmelCase :List[Any] = num_channels
_lowerCAmelCase :Optional[int] = embed_dim
_lowerCAmelCase :List[str] = hidden_sizes
_lowerCAmelCase :Union[str, Any] = depths
_lowerCAmelCase :int = num_heads
_lowerCAmelCase :Any = window_size
_lowerCAmelCase :List[Any] = mlp_ratio
_lowerCAmelCase :Optional[int] = qkv_bias
_lowerCAmelCase :Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase :Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase :Dict = drop_path_rate
_lowerCAmelCase :List[Any] = hidden_act
_lowerCAmelCase :Tuple = use_absolute_embeddings
_lowerCAmelCase :Optional[int] = patch_norm
_lowerCAmelCase :Optional[Any] = layer_norm_eps
_lowerCAmelCase :Union[str, Any] = initializer_range
_lowerCAmelCase :List[str] = is_training
_lowerCAmelCase :str = scope
_lowerCAmelCase :Optional[int] = use_labels
_lowerCAmelCase :List[Any] = type_sequence_label_size
_lowerCAmelCase :Union[str, Any] = encoder_stride
_lowerCAmelCase :Optional[int] = out_features
_lowerCAmelCase :List[str] = out_indices
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase :Dict = None
if self.use_labels:
_lowerCAmelCase :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase :str = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self: int ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple ):
_lowerCAmelCase :List[Any] = FocalNetModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCAmelCase :List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Union[str, Any] = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCAmelCase :Optional[int] = None
_lowerCAmelCase :Dict = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Any = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Any = FocalNetForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCAmelCase :List[Any] = 1
_lowerCAmelCase :List[Any] = FocalNetForMaskedImageModeling(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :int = model(_UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int , _UpperCAmelCase: Dict , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = self.type_sequence_label_size
_lowerCAmelCase :Dict = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase :Optional[int] = 1
_lowerCAmelCase :Tuple = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = config_and_inputs
_lowerCAmelCase :List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ (snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase : Optional[Any] = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Tuple = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Any = False
lowerCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = FocalNetModelTester(self )
_lowerCAmelCase :str = ConfigTester(self , config_class=_UpperCAmelCase , embed_dim=37 , has_text_modality=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
return
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def SCREAMING_SNAKE_CASE__ ( self: str ):
pass
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase :Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase , _lowerCAmelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Tuple = model_class(_UpperCAmelCase )
_lowerCAmelCase :Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase :int = [*signature.parameters.keys()]
_lowerCAmelCase :List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: int , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase :Optional[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_lowerCAmelCase :List[Any] = outputs.hidden_states
_lowerCAmelCase :str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# FocalNet has a different seq_length
_lowerCAmelCase :Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_lowerCAmelCase :List[str] = outputs.reshaped_hidden_states
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :int = reshaped_hidden_states[0].shape
_lowerCAmelCase :Optional[int] = (
reshaped_hidden_states[0].view(_UpperCAmelCase , _UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[int] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Dict = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase , _lowerCAmelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :str = 3
_lowerCAmelCase :Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCAmelCase :int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCAmelCase :Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :List[str] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Union[str, Any] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
@slow
def SCREAMING_SNAKE_CASE__ ( self: int ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase :List[Any] = FocalNetModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :Optional[int] = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
_lowerCAmelCase :str = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Tuple = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.default_image_processor
_lowerCAmelCase :Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCAmelCase :Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase :Dict = model(**_UpperCAmelCase )
# verify the logits
_lowerCAmelCase :str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
_lowerCAmelCase :Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : int = (FocalNetBackbone,) if is_torch_available() else ()
lowerCamelCase : str = FocalNetConfig
lowerCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Any = FocalNetModelTester(self ) | 687 | 1 |
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :Any = word.split()
def justify(__magic_name__ : list , __magic_name__ : int , __magic_name__ : int ) -> str:
_lowerCAmelCase :Optional[Any] = max_width - width
_lowerCAmelCase :Optional[int] = len(__magic_name__ )
if len(__magic_name__ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_lowerCAmelCase :str = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_lowerCAmelCase :List[str] = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_lowerCAmelCase :int = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__magic_name__ ):
num_spaces_between_words_list[i] += 1
_lowerCAmelCase :List[str] = []
for i in range(__magic_name__ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__magic_name__ )
_lowerCAmelCase :List[str] = []
_lowerCAmelCase :list[str] = []
_lowerCAmelCase :int = 0
for word in words:
if width + len(__magic_name__ ) + len(__magic_name__ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__magic_name__ )
width += len(__magic_name__ )
else:
# justify the line and add it to result
answer.append(justify(__magic_name__ , __magic_name__ , __magic_name__ ) )
# reset new line and new width
_lowerCAmelCase , _lowerCAmelCase :Dict = [word], len(__magic_name__ )
_lowerCAmelCase :Dict = max_width - width - len(__magic_name__ )
answer.append(' '.join(__magic_name__ ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod() | 687 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
a = HfApi()
a = {}
# fmt: off
a = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
a = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
a = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
a = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
a = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
a = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
a = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
a = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
a = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
a = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
a = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
a = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
a = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
a = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
a = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
a = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
a = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith("""CompVis"""):
a = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
a = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
a = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
a = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
a = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3
)
print(F'''{mod.modelId} has passed successfully!!!''') | 687 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Optional[int] = 10
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :str = [1, 2, 3, 4]
_lowerCAmelCase :Union[str, Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_lowerCAmelCase :Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :List[str] = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
_lowerCAmelCase , _lowerCAmelCase :Optional[Any] = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [] )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Optional[int] = ''
_lowerCAmelCase , _lowerCAmelCase :str = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [] )
self.assertEqual(_UpperCAmelCase , [] )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Optional[Any] = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
_lowerCAmelCase , _lowerCAmelCase :Optional[int] = process_story(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Optional[int] = ['It was the best of times.']
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :Union[str, Any] = torch.tensor([1, 2, 3, 4] )
_lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 0 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :List[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_lowerCAmelCase :Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 23 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 1 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :List[str] = 101
_lowerCAmelCase :Dict = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_lowerCAmelCase :int = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_lowerCAmelCase :List[str] = compute_token_type_ids(_UpperCAmelCase , _UpperCAmelCase )
np.testing.assert_array_equal(_UpperCAmelCase , _UpperCAmelCase ) | 687 | 1 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
a = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class UpperCAmelCase_ (tr.AbstractTransform ):
"""simple docstring"""
def __init__( self: Tuple , _UpperCAmelCase: str = " " ):
_lowerCAmelCase :Optional[Any] = sentence_delimiter
def SCREAMING_SNAKE_CASE__ ( self: str , _UpperCAmelCase: str ):
return list(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: List[str] ):
_lowerCAmelCase :Tuple = []
for sent_idx, sentence in enumerate(_UpperCAmelCase ):
chars.extend(self.process_string(_UpperCAmelCase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(_UpperCAmelCase ) - 1:
chars.append(self.sentence_delimiter )
return chars
a = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
a = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
a = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
a = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
a = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates',
] , )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Dict , _UpperCAmelCase: Tuple=False ):
if concatenate_texts:
return jiwer.compute_measures(
_UpperCAmelCase , _UpperCAmelCase , truth_transform=_UpperCAmelCase , hypothesis_transform=_UpperCAmelCase , )["wer"]
_lowerCAmelCase :str = 0
_lowerCAmelCase :Dict = 0
for prediction, reference in zip(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :Any = jiwer.compute_measures(
_UpperCAmelCase , _UpperCAmelCase , truth_transform=_UpperCAmelCase , hypothesis_transform=_UpperCAmelCase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 687 |
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
a = int(input("""Enter number: """).strip())
print(F'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''') | 687 | 1 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
a = datasets.utils.logging.get_logger(__name__)
a = ["""names""", """prefix"""]
a = ["""warn_bad_lines""", """error_bad_lines""", """mangle_dupe_cols"""]
a = ["""encoding_errors""", """on_bad_lines"""]
a = ["""date_format"""]
@dataclass
class UpperCAmelCase_ (datasets.BuilderConfig ):
"""simple docstring"""
lowerCamelCase : str = ","
lowerCamelCase : Optional[str] = None
lowerCamelCase : Optional[Union[int, List[int], str]] = "infer"
lowerCamelCase : Optional[List[str]] = None
lowerCamelCase : Optional[List[str]] = None
lowerCamelCase : Optional[Union[int, str, List[int], List[str]]] = None
lowerCamelCase : Optional[Union[List[int], List[str]]] = None
lowerCamelCase : Optional[str] = None
lowerCamelCase : bool = True
lowerCamelCase : Optional[Literal["c", "python", "pyarrow"]] = None
lowerCamelCase : Dict[Union[int, str], Callable[[Any], Any]] = None
lowerCamelCase : Optional[list] = None
lowerCamelCase : Optional[list] = None
lowerCamelCase : bool = False
lowerCamelCase : Optional[Union[int, List[int]]] = None
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[Union[str, List[str]]] = None
lowerCamelCase : bool = True
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : bool = True
lowerCamelCase : Optional[str] = None
lowerCamelCase : str = "."
lowerCamelCase : Optional[str] = None
lowerCamelCase : str = '"'
lowerCamelCase : int = 0
lowerCamelCase : Optional[str] = None
lowerCamelCase : Optional[str] = None
lowerCamelCase : Optional[str] = None
lowerCamelCase : Optional[str] = None
lowerCamelCase : bool = True
lowerCamelCase : bool = True
lowerCamelCase : int = 0
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : Optional[str] = None
lowerCamelCase : int = 1_00_00
lowerCamelCase : Optional[datasets.Features] = None
lowerCamelCase : Optional[str] = "strict"
lowerCamelCase : Literal["error", "warn", "skip"] = "error"
lowerCamelCase : Optional[str] = None
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
if self.delimiter is not None:
_lowerCAmelCase :str = self.delimiter
if self.column_names is not None:
_lowerCAmelCase :str = self.column_names
@property
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Tuple = {
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _UpperCAmelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class UpperCAmelCase_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCamelCase : Optional[int] = CsvConfig
def SCREAMING_SNAKE_CASE__ ( self: int ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Any ):
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_lowerCAmelCase :Union[str, Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_UpperCAmelCase , (str, list, tuple) ):
_lowerCAmelCase :str = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :int = [files]
_lowerCAmelCase :Any = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
_lowerCAmelCase :str = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :str = [files]
_lowerCAmelCase :int = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={'files': files} ) )
return splits
def SCREAMING_SNAKE_CASE__ ( self: str , _UpperCAmelCase: pa.Table ):
if self.config.features is not None:
_lowerCAmelCase :Optional[int] = self.config.features.arrow_schema
if all(not require_storage_cast(_UpperCAmelCase ) for feature in self.config.features.values() ):
# cheaper cast
_lowerCAmelCase :Dict = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_UpperCAmelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_lowerCAmelCase :Any = table_cast(_UpperCAmelCase , _UpperCAmelCase )
return pa_table
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: List[Any] ):
_lowerCAmelCase :Optional[int] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_lowerCAmelCase :str = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(_UpperCAmelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase ) ):
_lowerCAmelCase :Optional[int] = pd.read_csv(_UpperCAmelCase , iterator=_UpperCAmelCase , dtype=_UpperCAmelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(_UpperCAmelCase ):
_lowerCAmelCase :Optional[Any] = pa.Table.from_pandas(_UpperCAmelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_UpperCAmelCase )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(_UpperCAmelCase )}: {e}""" )
raise | 687 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: List[Any] , _UpperCAmelCase: int , _UpperCAmelCase: MutableSequence[float] ):
if len(_UpperCAmelCase ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
_lowerCAmelCase :list[float] = list(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = degree
def __add__( self: str , _UpperCAmelCase: Polynomial ):
if self.degree > polynomial_a.degree:
_lowerCAmelCase :Any = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _UpperCAmelCase )
else:
_lowerCAmelCase :List[Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _UpperCAmelCase )
def __sub__( self: str , _UpperCAmelCase: Polynomial ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self: Union[str, Any] ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self: int , _UpperCAmelCase: Polynomial ):
_lowerCAmelCase :list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: int | float ):
_lowerCAmelCase :int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self: Union[str, Any] ):
_lowerCAmelCase :Dict = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_UpperCAmelCase )
return polynomial
def __repr__( self: Optional[Any] ):
return self.__str__()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :list[float] = [0] * self.degree
for i in range(self.degree ):
_lowerCAmelCase :Tuple = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int | float = 0 ):
_lowerCAmelCase :list[float] = [0] * (self.degree + 2)
_lowerCAmelCase :str = constant
for i in range(self.degree + 1 ):
_lowerCAmelCase :List[str] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _UpperCAmelCase )
def __eq__( self: List[Any] , _UpperCAmelCase: object ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self: Optional[Any] , _UpperCAmelCase: object ):
return not self.__eq__(_UpperCAmelCase ) | 687 | 1 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = MgpstrTokenizer
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Optional[Any] = {}
lowerCamelCase : List[str] = False
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
super().setUp()
# fmt: off
_lowerCAmelCase :Union[str, Any] = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
_lowerCAmelCase :Optional[int] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
_lowerCAmelCase :str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '\n' )
def SCREAMING_SNAKE_CASE__ ( self: int , **_UpperCAmelCase: Optional[int] ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: str ):
_lowerCAmelCase :List[Any] = 'tester'
_lowerCAmelCase :Dict = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :str = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
_lowerCAmelCase :Union[str, Any] = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
_lowerCAmelCase :List[str] = tokenizer.encode([special_token] , add_special_tokens=_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , 1 )
_lowerCAmelCase :Union[str, Any] = tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
_lowerCAmelCase , _lowerCAmelCase :List[Any] = self.get_input_output_texts(_UpperCAmelCase )
_lowerCAmelCase :List[str] = tokenizer.tokenize(_UpperCAmelCase )
_lowerCAmelCase :int = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
_lowerCAmelCase :Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Dict = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertNotEqual(len(_UpperCAmelCase ) , 0 )
_lowerCAmelCase :List[Any] = tokenizer.decode(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(text_a.replace(' ' , '' ) , _UpperCAmelCase )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def SCREAMING_SNAKE_CASE__ ( self: int ):
pass | 687 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 | 1 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger()
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : nn.Module
lowerCamelCase : List[nn.Module] = field(default_factory=snake_case__ )
lowerCamelCase : list = field(default_factory=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Tensor , _UpperCAmelCase: Tensor ):
_lowerCAmelCase :Optional[int] = len(list(m.modules() ) ) == 1 or isinstance(_UpperCAmelCase , nn.Convad ) or isinstance(_UpperCAmelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_UpperCAmelCase )
def __call__( self: Optional[int] , _UpperCAmelCase: Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_UpperCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def SCREAMING_SNAKE_CASE__ ( self: Any ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda _UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : nn.Module
lowerCamelCase : nn.Module
lowerCamelCase : int = 1
lowerCamelCase : List = field(default_factory=snake_case__ )
lowerCamelCase : List = field(default_factory=snake_case__ )
lowerCamelCase : bool = True
def __call__( self: str , _UpperCAmelCase: Tensor ):
_lowerCAmelCase :Union[str, Any] = Tracker(self.dest )(_UpperCAmelCase ).parametrized
_lowerCAmelCase :Dict = Tracker(self.src )(_UpperCAmelCase ).parametrized
_lowerCAmelCase :str = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.src_skip , _UpperCAmelCase ) )
_lowerCAmelCase :Any = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.dest_skip , _UpperCAmelCase ) )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ) and self.raise_if_mismatch:
raise Exception(
f"""Numbers of operations are different. Source module has {len(_UpperCAmelCase )} operations while"""
f""" destination module has {len(_UpperCAmelCase )}.""" )
for dest_m, src_m in zip(_UpperCAmelCase , _UpperCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
def __init__( self: Optional[int] , _UpperCAmelCase: nn.Module ):
super().__init__()
_lowerCAmelCase :List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), f"""Unexpected layer name {k}"""
_lowerCAmelCase :int = len(_UpperCAmelCase ) + 1
feature_blocks.append((f"""res{block_index}""", v) )
_lowerCAmelCase :Tuple = nn.ModuleDict(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: str , _UpperCAmelCase: Tensor ):
return get_trunk_forward_outputs(
_UpperCAmelCase , out_feat_keys=_UpperCAmelCase , feature_blocks=self._feature_blocks , )
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: str ):
_lowerCAmelCase :Any = x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self: Any , _UpperCAmelCase: str ):
# default to timm!
if x not in self:
_lowerCAmelCase :List[str] = self.convert_name_to_timm(_UpperCAmelCase )
_lowerCAmelCase :Dict = partial(lambda: (timm.create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase ).eval(), None) )
else:
_lowerCAmelCase :Any = super().__getitem__(_UpperCAmelCase )
return val
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def __getitem__( self: Optional[Any] , _UpperCAmelCase: str ):
if "seer" in x and "in1k" not in x:
_lowerCAmelCase :Dict = RegNetModel
else:
_lowerCAmelCase :Optional[Any] = RegNetForImageClassification
return val
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : List[Tuple[str, str]] ):
"""simple docstring"""
for from_key, to_key in keys:
_lowerCAmelCase :str = from_state_dict[from_key].clone()
print(f"""Copied key={from_key} to={to_key}""" )
return to_state_dict
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Callable[[], nn.Module] , __magic_name__ : Callable[[], nn.Module] , __magic_name__ : RegNetConfig , __magic_name__ : Path , __magic_name__ : bool = True , ):
"""simple docstring"""
print(f"""Converting {name}...""" )
with torch.no_grad():
_lowerCAmelCase , _lowerCAmelCase :List[Any] = from_model_func()
_lowerCAmelCase :Tuple = our_model_func(__magic_name__ ).eval()
_lowerCAmelCase :Dict = ModuleTransfer(src=__magic_name__ , dest=__magic_name__ , raise_if_mismatch=__magic_name__ )
_lowerCAmelCase :List[Any] = torch.randn((1, 3, 224, 224) )
module_transfer(__magic_name__ )
if from_state_dict is not None:
_lowerCAmelCase :str = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
_lowerCAmelCase :int = [('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
_lowerCAmelCase :Optional[Any] = manually_copy_vissl_head(__magic_name__ , our_model.state_dict() , __magic_name__ )
our_model.load_state_dict(__magic_name__ )
_lowerCAmelCase :Tuple = our_model(__magic_name__ , output_hidden_states=__magic_name__ )
_lowerCAmelCase :Any = (
our_outputs.logits if isinstance(__magic_name__ , __magic_name__ ) else our_outputs.last_hidden_state
)
_lowerCAmelCase :List[Any] = from_model(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = from_output[-1] if type(__magic_name__ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
_lowerCAmelCase :Optional[Any] = our_outputs.hidden_states[-1]
assert torch.allclose(__magic_name__ , __magic_name__ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=__magic_name__ , )
_lowerCAmelCase :Optional[int] = 224 if 'seer' not in name else 384
# we can use the convnext one
_lowerCAmelCase :Union[str, Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=__magic_name__ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=__magic_name__ , )
print(f"""Pushed {name}""" )
def UpperCamelCase_( __magic_name__ : Path , __magic_name__ : str = None , __magic_name__ : bool = True ):
"""simple docstring"""
_lowerCAmelCase :int = 'imagenet-1k-id2label.json'
_lowerCAmelCase :Tuple = 1000
_lowerCAmelCase :List[str] = (1, num_labels)
_lowerCAmelCase :Any = 'huggingface/label-files'
_lowerCAmelCase :Dict = num_labels
_lowerCAmelCase :Dict = json.load(open(cached_download(hf_hub_url(__magic_name__ , __magic_name__ , repo_type='dataset' ) ) , 'r' ) )
_lowerCAmelCase :List[Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
_lowerCAmelCase :str = idalabel
_lowerCAmelCase :Tuple = {v: k for k, v in idalabel.items()}
_lowerCAmelCase :int = partial(__magic_name__ , num_labels=__magic_name__ , idalabel=__magic_name__ , labelaid=__magic_name__ )
_lowerCAmelCase :Dict = {
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
}
_lowerCAmelCase :Tuple = NameToOurModelFuncMap()
_lowerCAmelCase :Dict = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(__magic_name__ : str , __magic_name__ : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
_lowerCAmelCase :Optional[int] = torch.hub.load_state_dict_from_url(__magic_name__ , model_dir=str(__magic_name__ ) , map_location='cpu' )
_lowerCAmelCase :Any = model_func()
# check if we have a head, if yes add it
_lowerCAmelCase :Tuple = files['classy_state_dict']['base_model']['model']
_lowerCAmelCase :Dict = model_state_dict['trunk']
model.load_state_dict(__magic_name__ )
return model.eval(), model_state_dict["heads"]
# pretrained
_lowerCAmelCase :List[Any] = partial(
__magic_name__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_lowerCAmelCase :Tuple = partial(
__magic_name__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_lowerCAmelCase :List[Any] = partial(
__magic_name__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
_lowerCAmelCase :Optional[int] = partial(
__magic_name__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
_lowerCAmelCase :int = partial(
__magic_name__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_lowerCAmelCase :Dict = partial(
__magic_name__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_lowerCAmelCase :List[Any] = partial(
__magic_name__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
_lowerCAmelCase :str = partial(
__magic_name__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
__magic_name__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , __magic_name__ , __magic_name__ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
__magic_name__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , __magic_name__ , __magic_name__ , __magic_name__ , )
return config, expected_shape
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported regnet* architecture,"""
""" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
a = parser.parse_args()
a = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 687 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : float | Decimal , __magic_name__ : float = 10**-10 ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = a
while True:
_lowerCAmelCase :str = Decimal(__magic_name__ ) - (
Decimal(eval(__magic_name__ ) ) / Decimal(eval(str(diff(__magic_name__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__magic_name__ ) ) < precision: # noqa: S307
return float(__magic_name__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''') | 687 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = 'timm_backbone'
def __init__( self: Tuple , _UpperCAmelCase: str=None , _UpperCAmelCase: Optional[Any]=3 , _UpperCAmelCase: List[str]=True , _UpperCAmelCase: Union[str, Any]=True , _UpperCAmelCase: str=None , **_UpperCAmelCase: List[str] , ):
super().__init__(**_UpperCAmelCase )
_lowerCAmelCase :List[Any] = backbone
_lowerCAmelCase :Dict = num_channels
_lowerCAmelCase :Union[str, Any] = features_only
_lowerCAmelCase :str = use_pretrained_backbone
_lowerCAmelCase :int = True
_lowerCAmelCase :Tuple = out_indices if out_indices is not None else (-1,) | 687 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
a = {
"""sample_size""": 32,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [32, 64],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a = {
"""sample_size""": 64,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a = {
"""sample_size""": 256,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a = {
"""num_train_timesteps""": 40,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
a = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
a = {
"""num_train_timesteps""": 151,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
def UpperCamelCase_( __magic_name__ : Dict ):
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any]=False ):
"""simple docstring"""
_lowerCAmelCase :int = checkpoint[f"""{old_prefix}.in_layers.0.weight"""]
_lowerCAmelCase :Union[str, Any] = checkpoint[f"""{old_prefix}.in_layers.0.bias"""]
_lowerCAmelCase :str = checkpoint[f"""{old_prefix}.in_layers.2.weight"""]
_lowerCAmelCase :Optional[Any] = checkpoint[f"""{old_prefix}.in_layers.2.bias"""]
_lowerCAmelCase :str = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""]
_lowerCAmelCase :Any = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""]
_lowerCAmelCase :str = checkpoint[f"""{old_prefix}.out_layers.0.weight"""]
_lowerCAmelCase :List[Any] = checkpoint[f"""{old_prefix}.out_layers.0.bias"""]
_lowerCAmelCase :Optional[int] = checkpoint[f"""{old_prefix}.out_layers.3.weight"""]
_lowerCAmelCase :Dict = checkpoint[f"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
_lowerCAmelCase :List[Any] = checkpoint[f"""{old_prefix}.skip_connection.weight"""]
_lowerCAmelCase :int = checkpoint[f"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : List[str]=None ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Tuple = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Any = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
_lowerCAmelCase :int = checkpoint[f"""{old_prefix}.norm.weight"""]
_lowerCAmelCase :Dict = checkpoint[f"""{old_prefix}.norm.bias"""]
_lowerCAmelCase :Dict = weight_q.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :str = bias_q.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :List[str] = weight_k.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :Optional[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :Tuple = weight_v.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :List[Any] = bias_v.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :int = (
checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
_lowerCAmelCase :Optional[Any] = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Optional[Any] ):
"""simple docstring"""
_lowerCAmelCase :Union[str, Any] = torch.load(__magic_name__ , map_location='cpu' )
_lowerCAmelCase :List[Any] = {}
_lowerCAmelCase :List[str] = checkpoint['time_embed.0.weight']
_lowerCAmelCase :Tuple = checkpoint['time_embed.0.bias']
_lowerCAmelCase :Dict = checkpoint['time_embed.2.weight']
_lowerCAmelCase :Union[str, Any] = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_lowerCAmelCase :Union[str, Any] = checkpoint['label_emb.weight']
_lowerCAmelCase :str = checkpoint['input_blocks.0.0.weight']
_lowerCAmelCase :str = checkpoint['input_blocks.0.0.bias']
_lowerCAmelCase :List[Any] = unet_config['down_block_types']
_lowerCAmelCase :Any = unet_config['layers_per_block']
_lowerCAmelCase :List[Any] = unet_config['attention_head_dim']
_lowerCAmelCase :Tuple = unet_config['block_out_channels']
_lowerCAmelCase :List[str] = 1
_lowerCAmelCase :Optional[int] = channels_list[0]
for i, layer_type in enumerate(__magic_name__ ):
_lowerCAmelCase :Tuple = channels_list[i]
_lowerCAmelCase :Optional[Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__magic_name__ ):
_lowerCAmelCase :int = f"""down_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :List[Any] = f"""input_blocks.{current_layer}.0"""
_lowerCAmelCase :int = True if j == 0 and downsample_block_has_skip else False
_lowerCAmelCase :List[Any] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__magic_name__ ):
_lowerCAmelCase :List[str] = f"""down_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :Optional[int] = f"""input_blocks.{current_layer}.0"""
_lowerCAmelCase :List[str] = True if j == 0 and downsample_block_has_skip else False
_lowerCAmelCase :Optional[int] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
_lowerCAmelCase :Optional[int] = f"""down_blocks.{i}.attentions.{j}"""
_lowerCAmelCase :str = f"""input_blocks.{current_layer}.1"""
_lowerCAmelCase :Optional[Any] = convert_attention(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
_lowerCAmelCase :Union[str, Any] = f"""down_blocks.{i}.downsamplers.0"""
_lowerCAmelCase :Tuple = f"""input_blocks.{current_layer}.0"""
_lowerCAmelCase :Optional[int] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
current_layer += 1
_lowerCAmelCase :Dict = current_channels
# hardcoded the mid-block for now
_lowerCAmelCase :int = 'mid_block.resnets.0'
_lowerCAmelCase :Optional[Any] = 'middle_block.0'
_lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Optional[int] = 'mid_block.attentions.0'
_lowerCAmelCase :Optional[int] = 'middle_block.1'
_lowerCAmelCase :List[Any] = convert_attention(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Union[str, Any] = 'mid_block.resnets.1'
_lowerCAmelCase :Optional[int] = 'middle_block.2'
_lowerCAmelCase :int = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Tuple = 0
_lowerCAmelCase :str = unet_config['up_block_types']
for i, layer_type in enumerate(__magic_name__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCAmelCase :Optional[Any] = f"""up_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :Dict = f"""output_blocks.{current_layer}.0"""
_lowerCAmelCase :Any = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
_lowerCAmelCase :Any = f"""up_blocks.{i}.upsamplers.0"""
_lowerCAmelCase :Dict = f"""output_blocks.{current_layer-1}.1"""
_lowerCAmelCase :Tuple = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCAmelCase :Tuple = f"""up_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :List[str] = f"""output_blocks.{current_layer}.0"""
_lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
_lowerCAmelCase :str = f"""up_blocks.{i}.attentions.{j}"""
_lowerCAmelCase :List[Any] = f"""output_blocks.{current_layer}.1"""
_lowerCAmelCase :int = convert_attention(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
_lowerCAmelCase :Optional[int] = f"""up_blocks.{i}.upsamplers.0"""
_lowerCAmelCase :int = f"""output_blocks.{current_layer-1}.2"""
_lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :str = checkpoint['out.0.weight']
_lowerCAmelCase :Union[str, Any] = checkpoint['out.0.bias']
_lowerCAmelCase :List[Any] = checkpoint['out.2.weight']
_lowerCAmelCase :Dict = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
a = parser.parse_args()
a = strabool(args.class_cond)
a = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
a = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
a = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
a = None
a = con_pt_to_diffuser(args.unet_path, unet_config)
a = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
a = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
a = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
a = CMStochasticIterativeScheduler(**scheduler_config)
a = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path) | 687 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
_lowerCAmelCase :Tuple = 1024
_lowerCAmelCase :Dict = 4096
_lowerCAmelCase :List[Any] = 24
_lowerCAmelCase :Optional[Any] = 16
_lowerCAmelCase :Tuple = [5, 11, 17, 23]
_lowerCAmelCase :Optional[int] = [256, 512, 1024, 1024]
_lowerCAmelCase :Any = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
_lowerCAmelCase :Optional[int] = 768
_lowerCAmelCase :Tuple = [1, 1, 1, 0.5]
_lowerCAmelCase :Union[str, Any] = [256, 512, 768, 768]
_lowerCAmelCase :Any = 150
_lowerCAmelCase :Optional[int] = 16
_lowerCAmelCase :Union[str, Any] = (1, 384, 384)
_lowerCAmelCase :Optional[Any] = False
_lowerCAmelCase :int = 'project'
if "ade" in checkpoint_url:
_lowerCAmelCase :Tuple = True
_lowerCAmelCase :str = 768
_lowerCAmelCase :Tuple = [1, 1, 1, 0.5]
_lowerCAmelCase :str = 150
_lowerCAmelCase :int = 16
_lowerCAmelCase :str = 'huggingface/label-files'
_lowerCAmelCase :List[Any] = 'ade20k-id2label.json'
_lowerCAmelCase :List[str] = json.load(open(cached_download(hf_hub_url(__magic_name__ , __magic_name__ , repo_type='dataset' ) ) , 'r' ) )
_lowerCAmelCase :Dict = {int(__magic_name__ ): v for k, v in idalabel.items()}
_lowerCAmelCase :Optional[Any] = idalabel
_lowerCAmelCase :str = {v: k for k, v in idalabel.items()}
_lowerCAmelCase :Tuple = [1, 150, 480, 480]
return config, expected_shape
def UpperCamelCase_( __magic_name__ : Optional[Any] ):
"""simple docstring"""
_lowerCAmelCase :Tuple = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def UpperCamelCase_( __magic_name__ : Union[str, Any] ):
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_lowerCAmelCase :Optional[int] = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
_lowerCAmelCase :List[Any] = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
_lowerCAmelCase :int = name.replace('patch_embed' , '' )
if "pos_embed" in name:
_lowerCAmelCase :Dict = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
_lowerCAmelCase :int = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
_lowerCAmelCase :Union[str, Any] = name.replace('proj' , 'projection' )
if "blocks" in name:
_lowerCAmelCase :Tuple = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
_lowerCAmelCase :Any = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowerCAmelCase :Dict = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
_lowerCAmelCase :Dict = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
_lowerCAmelCase :Dict = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
_lowerCAmelCase :Union[str, Any] = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
_lowerCAmelCase :str = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
_lowerCAmelCase :str = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
_lowerCAmelCase :Union[str, Any] = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
_lowerCAmelCase :Tuple = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
_lowerCAmelCase :Any = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
_lowerCAmelCase :Tuple = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_lowerCAmelCase :str = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
_lowerCAmelCase :Dict = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
_lowerCAmelCase :Optional[int] = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
_lowerCAmelCase :Any = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
_lowerCAmelCase :Any = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
_lowerCAmelCase :str = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_lowerCAmelCase :List[Any] = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
_lowerCAmelCase :List[Any] = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
_lowerCAmelCase :Dict = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
_lowerCAmelCase :int = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_lowerCAmelCase :List[str] = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
_lowerCAmelCase :Optional[int] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
_lowerCAmelCase :List[str] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
_lowerCAmelCase :int = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
_lowerCAmelCase :Tuple = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
_lowerCAmelCase :Any = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
_lowerCAmelCase :Union[str, Any] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
_lowerCAmelCase :int = name.replace('pretrained' , 'dpt' )
if "bn" in name:
_lowerCAmelCase :List[str] = name.replace('bn' , 'batch_norm' )
if "head" in name:
_lowerCAmelCase :Union[str, Any] = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
_lowerCAmelCase :int = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
_lowerCAmelCase :Tuple = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
_lowerCAmelCase :List[Any] = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
_lowerCAmelCase :Optional[Any] = name.replace('..' , '.' )
if "stem.conv" in name:
_lowerCAmelCase :Any = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
_lowerCAmelCase :int = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
_lowerCAmelCase :List[Any] = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
_lowerCAmelCase :Tuple = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
_lowerCAmelCase :Optional[int] = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
_lowerCAmelCase :Union[str, Any] = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
_lowerCAmelCase :List[Any] = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : List[Any] ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase :Union[str, Any] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
_lowerCAmelCase :List[str] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase :Optional[int] = in_proj_weight[: config.hidden_size, :]
_lowerCAmelCase :List[Any] = in_proj_bias[: config.hidden_size]
_lowerCAmelCase :Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase :Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase :int = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase :Optional[Any] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCAmelCase :Any = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Optional[int] ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase :str = get_dpt_config(__magic_name__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_lowerCAmelCase :int = torch.load(__magic_name__ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(__magic_name__ )
# rename keys
for key in state_dict.copy().keys():
_lowerCAmelCase :List[Any] = state_dict.pop(__magic_name__ )
_lowerCAmelCase :int = val
# read in qkv matrices
read_in_q_k_v(__magic_name__ , __magic_name__ )
# load HuggingFace model
_lowerCAmelCase :List[str] = DPTForSemanticSegmentation(__magic_name__ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
# Check outputs on an image
_lowerCAmelCase :List[str] = 480 if 'ade' in checkpoint_url else 384
_lowerCAmelCase :int = DPTImageProcessor(size=__magic_name__ )
_lowerCAmelCase :Tuple = prepare_img()
_lowerCAmelCase :List[str] = image_processor(__magic_name__ , return_tensors='pt' )
# forward pass
_lowerCAmelCase :Union[str, Any] = model(**__magic_name__ ).logits if 'ade' in checkpoint_url else model(**__magic_name__ ).predicted_depth
if show_prediction:
_lowerCAmelCase :Tuple = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=__magic_name__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__magic_name__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
a = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
) | 687 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
a = """ \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
_lowerCAmelCase :Tuple = self.diffusers_dir
shutil.copy(
os.path.join(_UpperCAmelCase , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :str = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=None ):
_lowerCAmelCase :int = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_lowerCAmelCase :Dict = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_lowerCAmelCase :Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_lowerCAmelCase :List[str] = black.format_str(_UpperCAmelCase , mode=_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(_UpperCAmelCase , 'w' , newline='\n' ) as f:
f.write(_UpperCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_UpperCAmelCase )
with open(_UpperCAmelCase , 'r' ) as f:
self.assertTrue(f.read() , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :List[str] = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
# Base copy consistency
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , _UpperCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , )
# Copy consistency with a really long name
_lowerCAmelCase :Optional[int] = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , _UpperCAmelCase , _UpperCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , _UpperCAmelCase , overwrite_result=re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , ) | 687 | 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
a = """"""
a = """"""
a = """"""
a = 1 # (0 is vertical, 1 is horizontal)
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase :Union[str, Any] = get_dataset(__magic_name__ , __magic_name__ )
print('Processing...' )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = update_image_and_anno(__magic_name__ , __magic_name__ , __magic_name__ )
for index, image in enumerate(__magic_name__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase :Optional[Any] = random_chars(32 )
_lowerCAmelCase :str = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
_lowerCAmelCase :Tuple = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , __magic_name__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(__magic_name__ )} with {file_name}""" )
_lowerCAmelCase :str = []
for anno in new_annos[index]:
_lowerCAmelCase :List[str] = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(__magic_name__ )
with open(f"""/{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :int = []
_lowerCAmelCase :Union[str, Any] = []
for label_file in glob.glob(os.path.join(__magic_name__ , '*.txt' ) ):
_lowerCAmelCase :Optional[int] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(__magic_name__ ) as in_file:
_lowerCAmelCase :Union[str, Any] = in_file.readlines()
_lowerCAmelCase :List[Any] = os.path.join(__magic_name__ , f"""{label_name}.jpg""" )
_lowerCAmelCase :Tuple = []
for obj_list in obj_lists:
_lowerCAmelCase :Union[str, Any] = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__magic_name__ )
labels.append(__magic_name__ )
return img_paths, labels
def UpperCamelCase_( __magic_name__ : list , __magic_name__ : list , __magic_name__ : int = 1 ):
"""simple docstring"""
_lowerCAmelCase :str = []
_lowerCAmelCase :Any = []
_lowerCAmelCase :Optional[Any] = []
for idx in range(len(__magic_name__ ) ):
_lowerCAmelCase :Optional[int] = []
_lowerCAmelCase :Optional[Any] = img_list[idx]
path_list.append(__magic_name__ )
_lowerCAmelCase :List[str] = anno_list[idx]
_lowerCAmelCase :Optional[Any] = cva.imread(__magic_name__ )
if flip_type == 1:
_lowerCAmelCase :List[Any] = cva.flip(__magic_name__ , __magic_name__ )
for bbox in img_annos:
_lowerCAmelCase :List[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_lowerCAmelCase :List[str] = cva.flip(__magic_name__ , __magic_name__ )
for bbox in img_annos:
_lowerCAmelCase :List[str] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__magic_name__ )
new_imgs_list.append(__magic_name__ )
return new_imgs_list, new_annos_lists, path_list
def UpperCamelCase_( __magic_name__ : int = 32 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase :str = ascii_lowercase + digits
return "".join(random.choice(__magic_name__ ) for _ in range(__magic_name__ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""") | 687 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be trained.'} )
lowerCamelCase : Optional[str] = field(
default='./' , metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path of training dataset.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for training.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for evaluation.'} )
lowerCamelCase : Optional[float] = field(default=0.1 , metadata={'help': 'Value of weight decay.'} )
lowerCamelCase : Optional[int] = field(
default=1_00_00 , metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} )
lowerCamelCase : Optional[float] = field(default=2e-4 , metadata={'help': 'Learning rate fo training.'} )
lowerCamelCase : Optional[str] = field(default='cosine' , metadata={'help': 'Learning rate.'} )
lowerCamelCase : Optional[int] = field(
default=7_50 , metadata={'help': 'Number of warmup steps in the learning rate schedule.'} )
lowerCamelCase : Optional[int] = field(
default=16 , metadata={'help': 'Number of gradient accumulation steps.'} )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} )
lowerCamelCase : Optional[int] = field(default=5_00_00 , metadata={'help': 'Maximum number of training steps.'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Sequence lengths used for training.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Training seed.'} )
lowerCamelCase : Optional[int] = field(
default=10_24 , metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} , )
lowerCamelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'States path if the training should continue from a checkpoint folder.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'If True the data is pretokenized.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size used for evaluation.'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Length of sequences to be evaluated.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} )
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} , )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Sample from the language model\'s output distribution.'} )
lowerCamelCase : Optional[float] = field(default=0.2 , metadata={'help': 'Sampling temperature used for generation.'} )
lowerCamelCase : Optional[int] = field(default=2_56 , metadata={'help': 'Maximum number of newly generated tokens.'} )
lowerCamelCase : Optional[int] = field(default=0 , metadata={'help': 'Top-k parameter used for generation.'} )
lowerCamelCase : Optional[float] = field(default=0.95 , metadata={'help': 'Top-p parameter used for nucleus sampling.'} )
lowerCamelCase : Optional[int] = field(default=10 , metadata={'help': 'Number of generations to run in parallel.'} )
lowerCamelCase : Optional[int] = field(
default=2_00 , metadata={'help': 'Number of completions to generate for each sample.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase : Optional[str] = field(
default='eval_results.json' , metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase : Optional[str] = field(
default='0' , metadata={'help': 'Allow `code_eval` to execute Python code on machine'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={
'help': (
'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'
' number corresponds to which GPU device id to run on.'
)
} , )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={
'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'
} , )
lowerCamelCase : Optional[str] = field(
default='transformersbook/codeparrot' , metadata={'help': 'Folder or name of dataset to process.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot-clean' , metadata={'help': 'Folder to save processed processed dataset.'} )
lowerCamelCase : Optional[int] = field(
default=10_00_00 , metadata={'help': 'Number of files to save per JSON output file.'} )
lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase : Optional[float] = field(
default=10_00 , metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=1_00 , metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=0.25 , metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=1.5 , metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=0.7 , metadata={'help': 'Probability for filtering config, test and uncommon files.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} , )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'If True, near-duplicate samples are removed.'} )
lowerCamelCase : Optional[float] = field(
default=0.85 , metadata={'help': 'Jaccard threshold for near-duplicate samples.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='gpt2' , metadata={'help': 'Base tokenizer to build new tokenizer from.'} )
lowerCamelCase : Optional[str] = field(
default='transformersbook/codeparrot-train' , metadata={'help': 'Dataset to train tokenizer on.'} )
lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase : Optional[int] = field(default=20_00_00 , metadata={'help': 'Number of examples to train tokenizer on.'} )
lowerCamelCase : Optional[int] = field(
default=3_27_68 , metadata={'help': 'Number of examples to train the tokenizer on.'} )
lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of new tokenizer.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path to the dataset to pretokenize.'} )
lowerCamelCase : Optional[str] = field(
default='tokenized-codeparrot-train' , metadata={'help': 'Repo name of the pretokenized data.'} )
lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='gpt2-large' , metadata={'help': 'Configuration to use for model initialization.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Tokenizer attached to model.'} )
lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of the created model.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} ) | 687 | 1 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
a = numpy.array([0, 0])
a = numpy.array([0.5, 0.8_6_6_0_2_5_4])
a = numpy.array([1, 0])
a = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCamelCase_( __magic_name__ : list[numpy.ndarray] , __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :str = initial_vectors
for _ in range(__magic_name__ ):
_lowerCAmelCase :Tuple = iteration_step(__magic_name__ )
return vectors
def UpperCamelCase_( __magic_name__ : list[numpy.ndarray] ):
"""simple docstring"""
_lowerCAmelCase :List[str] = []
for i, start_vector in enumerate(vectors[:-1] ):
_lowerCAmelCase :Optional[Any] = vectors[i + 1]
new_vectors.append(__magic_name__ )
_lowerCAmelCase :List[str] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCamelCase_( __magic_name__ : numpy.ndarray , __magic_name__ : float ):
"""simple docstring"""
_lowerCAmelCase :Optional[int] = numpy.radians(__magic_name__ )
_lowerCAmelCase , _lowerCAmelCase :Dict = numpy.cos(__magic_name__ ), numpy.sin(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__magic_name__ , __magic_name__ )
def UpperCamelCase_( __magic_name__ : list[numpy.ndarray] ):
"""simple docstring"""
_lowerCAmelCase :Dict = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_lowerCAmelCase , _lowerCAmelCase :str = zip(*__magic_name__ )
plt.plot(__magic_name__ , __magic_name__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
a = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors) | 687 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :List[str] = 'ylacombe/bark-small'
_lowerCAmelCase :int = tempfile.mkdtemp()
_lowerCAmelCase :List[str] = 'en_speaker_1'
_lowerCAmelCase :Union[str, Any] = 'This is a test string'
_lowerCAmelCase :List[Any] = 'speaker_embeddings_path.json'
_lowerCAmelCase :str = 'speaker_embeddings'
def SCREAMING_SNAKE_CASE__ ( self: str , **_UpperCAmelCase: Optional[Any] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :List[Any] = self.get_tokenizer()
_lowerCAmelCase :List[str] = BarkProcessor(tokenizer=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase :List[str] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_lowerCAmelCase :Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_lowerCAmelCase :Any = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_lowerCAmelCase :List[Any] = 35
_lowerCAmelCase :Optional[int] = 2
_lowerCAmelCase :Dict = 8
_lowerCAmelCase :Dict = {
'semantic_prompt': np.ones(_UpperCAmelCase ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_lowerCAmelCase :Dict = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
_lowerCAmelCase :List[Any] = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_lowerCAmelCase :int = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Dict = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_lowerCAmelCase :Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Tuple = self.get_tokenizer()
_lowerCAmelCase :Union[str, Any] = BarkProcessor(tokenizer=_UpperCAmelCase )
_lowerCAmelCase :List[Any] = processor(text=self.input_string )
_lowerCAmelCase :List[str] = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() ) | 687 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any]=False ):
"""simple docstring"""
_lowerCAmelCase :List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCAmelCase :Any = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Tuple=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCAmelCase :Dict = ''
else:
_lowerCAmelCase :Optional[int] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase :List[str] = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
_lowerCAmelCase :int = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase :Optional[int] = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase :List[str] = in_proj_bias[: config.hidden_size]
_lowerCAmelCase :List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase :Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase :str = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase :Optional[int] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase_( __magic_name__ : List[Any] ):
"""simple docstring"""
_lowerCAmelCase :List[str] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def UpperCamelCase_( __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Dict ):
"""simple docstring"""
_lowerCAmelCase :List[str] = dct.pop(__magic_name__ )
_lowerCAmelCase :Optional[int] = val
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :str = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCAmelCase :int = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_( __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Optional[Any]=True ):
"""simple docstring"""
_lowerCAmelCase :Any = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowerCAmelCase :int = 8
# set labels if required
if not base_model:
_lowerCAmelCase :List[str] = 1000
_lowerCAmelCase :Union[str, Any] = 'huggingface/label-files'
_lowerCAmelCase :List[str] = 'imagenet-1k-id2label.json'
_lowerCAmelCase :Union[str, Any] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type='dataset' ) , 'r' ) )
_lowerCAmelCase :List[Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
_lowerCAmelCase :List[Any] = idalabel
_lowerCAmelCase :Tuple = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowerCAmelCase :Union[str, Any] = 384
_lowerCAmelCase :Dict = 1536
_lowerCAmelCase :Optional[Any] = 12
_lowerCAmelCase :str = 6
# load original model from torch hub
_lowerCAmelCase :int = torch.hub.load('facebookresearch/dino:main' , __magic_name__ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase :Optional[int] = original_model.state_dict()
if base_model:
remove_classification_head_(__magic_name__ )
_lowerCAmelCase :Optional[int] = create_rename_keys(__magic_name__ , base_model=__magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , __magic_name__ )
# load HuggingFace model
if base_model:
_lowerCAmelCase :Optional[Any] = ViTModel(__magic_name__ , add_pooling_layer=__magic_name__ ).eval()
else:
_lowerCAmelCase :Union[str, Any] = ViTForImageClassification(__magic_name__ ).eval()
model.load_state_dict(__magic_name__ )
# Check outputs on an image, prepared by ViTImageProcessor
_lowerCAmelCase :Any = ViTImageProcessor()
_lowerCAmelCase :int = image_processor(images=prepare_img() , return_tensors='pt' )
_lowerCAmelCase :List[Any] = encoding['pixel_values']
_lowerCAmelCase :List[str] = model(__magic_name__ )
if base_model:
_lowerCAmelCase :Dict = original_model(__magic_name__ )
assert torch.allclose(__magic_name__ , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
_lowerCAmelCase :str = original_model(__magic_name__ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__magic_name__ , outputs.logits , atol=1e-3 )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__magic_name__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""dino_vitb16""",
type=str,
help="""Name of the model trained with DINO you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--base_model""",
action="""store_true""",
help="""Whether to only convert the base model (no projection head weights).""",
)
parser.set_defaults(base_model=True)
a = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model) | 687 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : int = 'bert'
def __init__( self: Optional[Any] , _UpperCAmelCase: Tuple=3_0522 , _UpperCAmelCase: int=768 , _UpperCAmelCase: Union[str, Any]=12 , _UpperCAmelCase: Dict=12 , _UpperCAmelCase: List[Any]=3072 , _UpperCAmelCase: List[Any]="gelu" , _UpperCAmelCase: Union[str, Any]=0.1 , _UpperCAmelCase: Dict=0.1 , _UpperCAmelCase: List[Any]=512 , _UpperCAmelCase: Optional[Any]=2 , _UpperCAmelCase: Optional[int]=0.0_2 , _UpperCAmelCase: Any=1e-1_2 , _UpperCAmelCase: Optional[Any]=0 , _UpperCAmelCase: Union[str, Any]="absolute" , _UpperCAmelCase: Dict=True , _UpperCAmelCase: Optional[Any]=None , **_UpperCAmelCase: Optional[int] , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :List[Any] = vocab_size
_lowerCAmelCase :Tuple = hidden_size
_lowerCAmelCase :Dict = num_hidden_layers
_lowerCAmelCase :Optional[Any] = num_attention_heads
_lowerCAmelCase :List[Any] = hidden_act
_lowerCAmelCase :int = intermediate_size
_lowerCAmelCase :Tuple = hidden_dropout_prob
_lowerCAmelCase :Tuple = attention_probs_dropout_prob
_lowerCAmelCase :List[Any] = max_position_embeddings
_lowerCAmelCase :Dict = type_vocab_size
_lowerCAmelCase :Any = initializer_range
_lowerCAmelCase :int = layer_norm_eps
_lowerCAmelCase :List[Any] = position_embedding_type
_lowerCAmelCase :int = use_cache
_lowerCAmelCase :Union[str, Any] = classifier_dropout
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
if self.task == "multiple-choice":
_lowerCAmelCase :List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase :Any = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] ) | 687 | 1 |
from typing import Any
def UpperCamelCase_( __magic_name__ : list ):
"""simple docstring"""
if not input_list:
return []
_lowerCAmelCase :str = [input_list.count(__magic_name__ ) for value in input_list]
_lowerCAmelCase :List[Any] = max(__magic_name__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(__magic_name__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod() | 687 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ):
"""simple docstring"""
if isinstance(__magic_name__ , torch.Tensor ):
return image
elif isinstance(__magic_name__ , PIL.Image.Image ):
_lowerCAmelCase :Tuple = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowerCAmelCase :List[Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_lowerCAmelCase :Optional[Any] = np.concatenate(__magic_name__ , axis=0 )
_lowerCAmelCase :Any = np.array(__magic_name__ ).astype(np.floataa ) / 255.0
_lowerCAmelCase :Optional[int] = image.transpose(0 , 3 , 1 , 2 )
_lowerCAmelCase :int = 2.0 * image - 1.0
_lowerCAmelCase :Optional[int] = torch.from_numpy(__magic_name__ )
elif isinstance(image[0] , torch.Tensor ):
_lowerCAmelCase :str = torch.cat(__magic_name__ , dim=0 )
return image
def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : int=0.9995 ):
"""simple docstring"""
if not isinstance(__magic_name__ , np.ndarray ):
_lowerCAmelCase :Tuple = True
_lowerCAmelCase :str = va.device
_lowerCAmelCase :List[str] = va.cpu().numpy()
_lowerCAmelCase :List[str] = va.cpu().numpy()
_lowerCAmelCase :Any = np.sum(va * va / (np.linalg.norm(__magic_name__ ) * np.linalg.norm(__magic_name__ )) )
if np.abs(__magic_name__ ) > DOT_THRESHOLD:
_lowerCAmelCase :Optional[Any] = (1 - t) * va + t * va
else:
_lowerCAmelCase :int = np.arccos(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = np.sin(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = theta_a * t
_lowerCAmelCase :str = np.sin(__magic_name__ )
_lowerCAmelCase :Any = np.sin(theta_a - theta_t ) / sin_theta_a
_lowerCAmelCase :Optional[Any] = sin_theta_t / sin_theta_a
_lowerCAmelCase :List[Any] = sa * va + sa * va
if inputs_are_torch:
_lowerCAmelCase :int = torch.from_numpy(__magic_name__ ).to(__magic_name__ )
return va
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :Any = F.normalize(__magic_name__ , dim=-1 )
_lowerCAmelCase :str = F.normalize(__magic_name__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
for param in model.parameters():
_lowerCAmelCase :List[str] = value
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def __init__( self: Any , _UpperCAmelCase: AutoencoderKL , _UpperCAmelCase: CLIPTextModel , _UpperCAmelCase: CLIPModel , _UpperCAmelCase: CLIPTokenizer , _UpperCAmelCase: UNetaDConditionModel , _UpperCAmelCase: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , _UpperCAmelCase: CLIPFeatureExtractor , _UpperCAmelCase: str=None , _UpperCAmelCase: Tuple=None , _UpperCAmelCase: Union[str, Any]=None , ):
super().__init__()
self.register_modules(
vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , clip_model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , coca_model=_UpperCAmelCase , coca_tokenizer=_UpperCAmelCase , coca_transform=_UpperCAmelCase , )
_lowerCAmelCase :int = (
feature_extractor.size
if isinstance(feature_extractor.size , _UpperCAmelCase )
else feature_extractor.size['shortest_edge']
)
_lowerCAmelCase :Union[str, Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _UpperCAmelCase )
set_requires_grad(self.clip_model , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowerCAmelCase :Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
self.enable_attention_slicing(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
set_requires_grad(self.vae , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
set_requires_grad(self.vae , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
set_requires_grad(self.unet , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
set_requires_grad(self.unet , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Dict ):
# get the original timestep using init_timestep
_lowerCAmelCase :Optional[Any] = min(int(num_inference_steps * strength ) , _UpperCAmelCase )
_lowerCAmelCase :List[str] = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase :Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Union[str, Any]=None ):
if not isinstance(_UpperCAmelCase , torch.Tensor ):
raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(_UpperCAmelCase )}""" )
_lowerCAmelCase :Union[str, Any] = image.to(device=_UpperCAmelCase , dtype=_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :List[Any] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCAmelCase )
]
_lowerCAmelCase :List[str] = torch.cat(_UpperCAmelCase , dim=0 )
else:
_lowerCAmelCase :List[str] = self.vae.encode(_UpperCAmelCase ).latent_dist.sample(_UpperCAmelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :List[Any] = 0.1_8_2_1_5 * init_latents
_lowerCAmelCase :List[Any] = init_latents.repeat_interleave(_UpperCAmelCase , dim=0 )
_lowerCAmelCase :Dict = randn_tensor(init_latents.shape , generator=_UpperCAmelCase , device=_UpperCAmelCase , dtype=_UpperCAmelCase )
# get latents
_lowerCAmelCase :Dict = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :List[str] = init_latents
return latents
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Union[str, Any] ):
_lowerCAmelCase :Optional[int] = self.coca_transform(_UpperCAmelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_lowerCAmelCase :Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_lowerCAmelCase :int = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: List[str] ):
_lowerCAmelCase :Optional[int] = self.feature_extractor.preprocess(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
_lowerCAmelCase :List[str] = self.clip_model.get_image_features(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase )
_lowerCAmelCase :Dict = image_embeddings_clip.repeat_interleave(_UpperCAmelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , _UpperCAmelCase: Dict , _UpperCAmelCase: str , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , ):
_lowerCAmelCase :Dict = latents.detach().requires_grad_()
_lowerCAmelCase :Optional[Any] = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
_lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_lowerCAmelCase :int = self.scheduler.alphas_cumprod[timestep]
_lowerCAmelCase :Optional[int] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase :str = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_lowerCAmelCase :Optional[Any] = torch.sqrt(_UpperCAmelCase )
_lowerCAmelCase :List[str] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _UpperCAmelCase ):
_lowerCAmelCase :Dict = self.scheduler.sigmas[index]
_lowerCAmelCase :Optional[Any] = latents - sigma * noise_pred
else:
raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :Tuple = 1 / 0.1_8_2_1_5 * sample
_lowerCAmelCase :Optional[Any] = self.vae.decode(_UpperCAmelCase ).sample
_lowerCAmelCase :List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase :Tuple = transforms.Resize(self.feature_extractor_size )(_UpperCAmelCase )
_lowerCAmelCase :Tuple = self.normalize(_UpperCAmelCase ).to(latents.dtype )
_lowerCAmelCase :List[Any] = self.clip_model.get_image_features(_UpperCAmelCase )
_lowerCAmelCase :List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase )
_lowerCAmelCase :Tuple = spherical_dist_loss(_UpperCAmelCase , _UpperCAmelCase ).mean() * clip_guidance_scale
_lowerCAmelCase :str = -torch.autograd.grad(_UpperCAmelCase , _UpperCAmelCase )[0]
if isinstance(self.scheduler , _UpperCAmelCase ):
_lowerCAmelCase :Union[str, Any] = latents.detach() + grads * (sigma**2)
_lowerCAmelCase :Dict = noise_pred_original
else:
_lowerCAmelCase :Optional[int] = noise_pred_original - torch.sqrt(_UpperCAmelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self: Optional[int] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: float = 0.6 , _UpperCAmelCase: Optional[int] = 50 , _UpperCAmelCase: Optional[float] = 7.5 , _UpperCAmelCase: Optional[int] = 1 , _UpperCAmelCase: float = 0.0 , _UpperCAmelCase: Optional[float] = 100 , _UpperCAmelCase: Optional[torch.Generator] = None , _UpperCAmelCase: Optional[str] = "pil" , _UpperCAmelCase: bool = True , _UpperCAmelCase: float = 0.8 , _UpperCAmelCase: float = 0.1 , _UpperCAmelCase: float = 0.1 , ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size:
raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(_UpperCAmelCase )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(_UpperCAmelCase , torch.Generator ) and batch_size > 1:
_lowerCAmelCase :int = [generator] + [None] * (batch_size - 1)
_lowerCAmelCase :List[Any] = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
_lowerCAmelCase :Optional[int] = [x[0] for x in coca_is_none if x[1]]
_lowerCAmelCase :List[str] = ', '.join(_UpperCAmelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_UpperCAmelCase ):
raise ValueError(
f"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowerCAmelCase :List[Any] = self.get_image_description(_UpperCAmelCase )
if style_prompt is None:
if len(_UpperCAmelCase ):
raise ValueError(
f"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowerCAmelCase :Any = self.get_image_description(_UpperCAmelCase )
# get prompt text embeddings for content and style
_lowerCAmelCase :Any = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , )
_lowerCAmelCase :str = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_lowerCAmelCase :int = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , )
_lowerCAmelCase :Union[str, Any] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_lowerCAmelCase :List[str] = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# duplicate text embeddings for each generation per prompt
_lowerCAmelCase :str = text_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 )
# set timesteps
_lowerCAmelCase :Any = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_lowerCAmelCase :Dict = {}
if accepts_offset:
_lowerCAmelCase :Optional[int] = 1
self.scheduler.set_timesteps(_UpperCAmelCase , **_UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_lowerCAmelCase , _lowerCAmelCase :List[str] = self.get_timesteps(_UpperCAmelCase , _UpperCAmelCase , self.device )
_lowerCAmelCase :int = timesteps[:1].repeat(_UpperCAmelCase )
# Preprocess image
_lowerCAmelCase :Dict = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :int = self.prepare_latents(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase )
_lowerCAmelCase :Any = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.prepare_latents(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase )
_lowerCAmelCase :str = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if clip_guidance_scale > 0:
_lowerCAmelCase :Optional[Any] = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Dict = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Any = slerp(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCAmelCase :int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase :Optional[int] = content_text_input.input_ids.shape[-1]
_lowerCAmelCase :Union[str, Any] = self.tokenizer([''] , padding='max_length' , max_length=_UpperCAmelCase , return_tensors='pt' )
_lowerCAmelCase :Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_lowerCAmelCase :Optional[int] = uncond_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase :int = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCAmelCase :Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_lowerCAmelCase :Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_lowerCAmelCase :Any = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device='cpu' , dtype=_UpperCAmelCase ).to(
self.device )
else:
_lowerCAmelCase :List[Any] = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_lowerCAmelCase :int = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase :Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase :Any = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase :Any = {}
if accepts_eta:
_lowerCAmelCase :Any = eta
# check if the scheduler accepts generator
_lowerCAmelCase :List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_lowerCAmelCase :List[Any] = generator
with self.progress_bar(total=_UpperCAmelCase ):
for i, t in enumerate(_UpperCAmelCase ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase :Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase :Tuple = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
_lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase :List[str] = noise_pred.chunk(2 )
_lowerCAmelCase :Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_lowerCAmelCase :List[Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_lowerCAmelCase , _lowerCAmelCase :List[str] = self.cond_fn(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase :str = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :str = 1 / 0.1_8_2_1_5 * latents
_lowerCAmelCase :Any = self.vae.decode(_UpperCAmelCase ).sample
_lowerCAmelCase :List[str] = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase :Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase :List[Any] = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_UpperCAmelCase , nsfw_content_detected=_UpperCAmelCase ) | 687 | 1 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :List[str] = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase :str = 'pt'
_lowerCAmelCase :Optional[Any] = 'tf'
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: List[Any] ):
_lowerCAmelCase :List[str] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Tuple ):
_lowerCAmelCase :int = TFAutoModel.from_pretrained(self.test_model , from_pt=_UpperCAmelCase )
model_tf.save_pretrained(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Union[str, Any] = 'mock_framework'
# Framework provided - return whatever the user provides
_lowerCAmelCase :Tuple = FeaturesManager.determine_framework(self.test_model , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_UpperCAmelCase )
_lowerCAmelCase :List[str] = FeaturesManager.determine_framework(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = FeaturesManager.determine_framework(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: str ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_UpperCAmelCase )
_lowerCAmelCase :Any = FeaturesManager.determine_framework(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = FeaturesManager.determine_framework(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_UpperCAmelCase ):
_lowerCAmelCase :str = FeaturesManager.determine_framework(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = MagicMock(return_value=_UpperCAmelCase )
with patch('transformers.onnx.features.is_tf_available' , _UpperCAmelCase ):
_lowerCAmelCase :str = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_UpperCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase :Any = MagicMock(return_value=_UpperCAmelCase )
with patch('transformers.onnx.features.is_torch_available' , _UpperCAmelCase ):
_lowerCAmelCase :Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_UpperCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
_lowerCAmelCase :Dict = MagicMock(return_value=_UpperCAmelCase )
_lowerCAmelCase :List[str] = MagicMock(return_value=_UpperCAmelCase )
with patch('transformers.onnx.features.is_tf_available' , _UpperCAmelCase ), patch(
'transformers.onnx.features.is_torch_available' , _UpperCAmelCase ):
_lowerCAmelCase :int = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_UpperCAmelCase , self.framework_pt )
# Both not in environment -> raise error
_lowerCAmelCase :Tuple = MagicMock(return_value=_UpperCAmelCase )
_lowerCAmelCase :Dict = MagicMock(return_value=_UpperCAmelCase )
with patch('transformers.onnx.features.is_tf_available' , _UpperCAmelCase ), patch(
'transformers.onnx.features.is_torch_available' , _UpperCAmelCase ):
with self.assertRaises(_UpperCAmelCase ):
_lowerCAmelCase :Union[str, Any] = FeaturesManager.determine_framework(self.test_model ) | 687 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :Optional[int] = list(__magic_name__ )
_lowerCAmelCase :Dict = list(__magic_name__ )
_lowerCAmelCase :Any = 0
for i in range(len(__magic_name__ ) ):
if lista[i] != lista[i]:
count += 1
_lowerCAmelCase :Union[str, Any] = '_'
if count > 1:
return False
else:
return "".join(__magic_name__ )
def UpperCamelCase_( __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :int = []
while True:
_lowerCAmelCase :str = ['$'] * len(__magic_name__ )
_lowerCAmelCase :Optional[int] = []
for i in range(len(__magic_name__ ) ):
for j in range(i + 1 , len(__magic_name__ ) ):
_lowerCAmelCase :int = compare_string(binary[i] , binary[j] )
if k is False:
_lowerCAmelCase :str = '*'
_lowerCAmelCase :Union[str, Any] = '*'
temp.append('X' )
for i in range(len(__magic_name__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__magic_name__ ) == 0:
return pi
_lowerCAmelCase :Any = list(set(__magic_name__ ) )
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : Sequence[float] ):
"""simple docstring"""
_lowerCAmelCase :str = []
for minterm in minterms:
_lowerCAmelCase :Any = ''
for _ in range(__magic_name__ ):
_lowerCAmelCase :Tuple = str(minterm % 2 ) + string
minterm //= 2
temp.append(__magic_name__ )
return temp
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str , __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = list(__magic_name__ )
_lowerCAmelCase :List[Any] = list(__magic_name__ )
_lowerCAmelCase :Optional[Any] = 0
for i in range(len(__magic_name__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase_( __magic_name__ : list[list[int]] , __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :str = []
_lowerCAmelCase :List[str] = [0] * len(__magic_name__ )
for i in range(len(chart[0] ) ):
_lowerCAmelCase :Dict = 0
_lowerCAmelCase :Optional[Any] = -1
for j in range(len(__magic_name__ ) ):
if chart[j][i] == 1:
count += 1
_lowerCAmelCase :List[Any] = j
if count == 1:
_lowerCAmelCase :Dict = 1
for i in range(len(__magic_name__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__magic_name__ ) ):
_lowerCAmelCase :Dict = 0
temp.append(prime_implicants[i] )
while True:
_lowerCAmelCase :Dict = 0
_lowerCAmelCase :Any = -1
_lowerCAmelCase :Optional[Any] = 0
for i in range(len(__magic_name__ ) ):
_lowerCAmelCase :str = chart[i].count(1 )
if count_n > max_n:
_lowerCAmelCase :Optional[Any] = count_n
_lowerCAmelCase :Dict = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__magic_name__ ) ):
_lowerCAmelCase :str = 0
def UpperCamelCase_( __magic_name__ : list[str] , __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :str = [[0 for x in range(len(__magic_name__ ) )] for x in range(len(__magic_name__ ) )]
for i in range(len(__magic_name__ ) ):
_lowerCAmelCase :Tuple = prime_implicants[i].count('_' )
for j in range(len(__magic_name__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , __magic_name__ ):
_lowerCAmelCase :str = 1
return chart
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :Tuple = int(input('Enter the no. of variables\n' ) )
_lowerCAmelCase :Tuple = [
float(__magic_name__ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
_lowerCAmelCase :List[str] = decimal_to_binary(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Any = check(__magic_name__ )
print('Prime Implicants are:' )
print(__magic_name__ )
_lowerCAmelCase :List[Any] = prime_implicant_chart(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Tuple = selection(__magic_name__ , __magic_name__ )
print('Essential Prime Implicants are:' )
print(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 687 | 1 |
a = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
a = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
a = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
} | 687 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
a = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
a = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
a = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[int]=4 , _UpperCAmelCase: Optional[int]=False ):
_lowerCAmelCase :Any = compute_bleu(
reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) :Tuple = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
} | 687 | 1 |
import os
from math import logaa
def UpperCamelCase_( __magic_name__ : str = "base_exp.txt" ):
"""simple docstring"""
_lowerCAmelCase :float = 0
_lowerCAmelCase :int = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__magic_name__ ) , __magic_name__ ) ) ):
_lowerCAmelCase , _lowerCAmelCase :Optional[Any] = list(map(__magic_name__ , line.split(',' ) ) )
if x * logaa(__magic_name__ ) > largest:
_lowerCAmelCase :int = x * logaa(__magic_name__ )
_lowerCAmelCase :Optional[Any] = i + 1
return result
if __name__ == "__main__":
print(solution()) | 687 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 | 1 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[str] = GPTaTokenizer
lowerCamelCase : Dict = GPTaTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : Optional[int] = {'add_prefix_space': True}
lowerCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase :int = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
_lowerCAmelCase :int = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
_lowerCAmelCase :List[str] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_lowerCAmelCase :Optional[int] = {'unk_token': '<unk>'}
_lowerCAmelCase :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase :Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self: Any , **_UpperCAmelCase: Tuple ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int , **_UpperCAmelCase: str ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Tuple ):
_lowerCAmelCase :Any = 'lower newer'
_lowerCAmelCase :str = 'lower newer'
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :Optional[int] = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase :Optional[Any] = 'lower newer'
_lowerCAmelCase :Union[str, Any] = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
_lowerCAmelCase :Dict = tokenizer.tokenize(_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Optional[int] = tokens + [tokenizer.unk_token]
_lowerCAmelCase :List[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase :int = self.get_tokenizer()
_lowerCAmelCase :List[Any] = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase )
_lowerCAmelCase :Any = 'lower newer'
# Testing tokenization
_lowerCAmelCase :Dict = tokenizer.tokenize(_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing conversion to ids without special tokens
_lowerCAmelCase :Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
_lowerCAmelCase :int = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing conversion to ids with special tokens
_lowerCAmelCase :List[Any] = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = tokenizer.encode(_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
_lowerCAmelCase :Tuple = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing the unknown token
_lowerCAmelCase :Optional[int] = tokens + [rust_tokenizer.unk_token]
_lowerCAmelCase :Optional[int] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , *_UpperCAmelCase: Tuple , **_UpperCAmelCase: List[str] ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: int=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowerCAmelCase :Optional[int] = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
# Simple input
_lowerCAmelCase :Any = 'This is a simple input'
_lowerCAmelCase :Union[str, Any] = ['This is a simple input 1', 'This is a simple input 2']
_lowerCAmelCase :Union[str, Any] = ('This is a simple input', 'This is a pair')
_lowerCAmelCase :Optional[int] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' )
# Simple input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' )
# Simple input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' , )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' )
# Pair input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' , )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
_lowerCAmelCase :List[str] = 'This is a simple input'
_lowerCAmelCase :str = ['This is a simple input looooooooong', 'This is a simple input']
_lowerCAmelCase :Tuple = ('This is a simple input', 'This is a pair')
_lowerCAmelCase :Any = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
_lowerCAmelCase :Dict = tokenizer.pad_token_id
_lowerCAmelCase :List[Any] = tokenizer(_UpperCAmelCase , padding='max_length' , max_length=30 , return_tensors='np' )
_lowerCAmelCase :Union[str, Any] = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncate=_UpperCAmelCase , return_tensors='np' )
_lowerCAmelCase :str = tokenizer(*_UpperCAmelCase , padding='max_length' , max_length=60 , return_tensors='np' )
_lowerCAmelCase :Dict = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncate=_UpperCAmelCase , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Union[str, Any] = '$$$'
_lowerCAmelCase :Union[str, Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=_UpperCAmelCase , add_bos_token=_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = 'This is a simple input'
_lowerCAmelCase :Optional[Any] = ['This is a simple input 1', 'This is a simple input 2']
_lowerCAmelCase :Dict = tokenizer.bos_token_id
_lowerCAmelCase :Optional[int] = tokenizer(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = tokenizer(_UpperCAmelCase )
self.assertEqual(out_s.input_ids[0] , _UpperCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCAmelCase :Optional[Any] = tokenizer.decode(out_s.input_ids )
_lowerCAmelCase :int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , _UpperCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self: Any ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
_lowerCAmelCase :Tuple = [self.get_tokenizer(do_lower_case=_UpperCAmelCase , add_bos_token=_UpperCAmelCase )]
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
_lowerCAmelCase :Dict = 'Encode this.'
_lowerCAmelCase :Optional[int] = 'This one too please.'
_lowerCAmelCase :Union[str, Any] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
encoded_sequence += tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
_lowerCAmelCase :Dict = tokenizer.encode_plus(
_UpperCAmelCase , _UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , )
_lowerCAmelCase :List[Any] = encoded_sequence_dict['input_ids']
_lowerCAmelCase :List[str] = encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
_lowerCAmelCase :str = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(_UpperCAmelCase )
]
_lowerCAmelCase :Union[str, Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
@require_tokenizers
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
_lowerCAmelCase :Optional[int] = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = 'A photo of a cat'
_lowerCAmelCase :Tuple = tokenizer.encode(
_UpperCAmelCase , )
self.assertEqual(_UpperCAmelCase , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('test_opt' )
_lowerCAmelCase :Any = AutoTokenizer.from_pretrained('./test_opt' )
_lowerCAmelCase :Optional[int] = tokenizer.encode(
_UpperCAmelCase , )
self.assertEqual(_UpperCAmelCase , [2, 250, 1345, 9, 10, 4758] )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :List[Any] = AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = 'A photo of a cat'
_lowerCAmelCase :int = tokenizer.encode(
_UpperCAmelCase , )
# Same as above
self.assertEqual(_UpperCAmelCase , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip('This test is failing because of a bug in the fast tokenizer' )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :List[str] = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = 'bos'
_lowerCAmelCase :int = tokenizer.get_vocab()['bos']
_lowerCAmelCase :int = 'A photo of a cat'
_lowerCAmelCase :List[Any] = tokenizer.encode(
_UpperCAmelCase , )
# We changed the bos token
self.assertEqual(_UpperCAmelCase , [3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('./tok' )
_lowerCAmelCase :Dict = AutoTokenizer.from_pretrained('./tok' )
self.assertTrue(tokenizer.is_fast )
_lowerCAmelCase :List[str] = tokenizer.encode(
_UpperCAmelCase , )
self.assertEqual(_UpperCAmelCase , [3_1957, 250, 1345, 9, 10, 4758] ) | 687 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self: str , _UpperCAmelCase: str , _UpperCAmelCase: Optional[int]=7 , _UpperCAmelCase: Union[str, Any]=3 , _UpperCAmelCase: int=18 , _UpperCAmelCase: List[Any]=30 , _UpperCAmelCase: List[Any]=400 , _UpperCAmelCase: Optional[Any]=True , _UpperCAmelCase: Any=None , _UpperCAmelCase: Any=True , _UpperCAmelCase: int=None , _UpperCAmelCase: Union[str, Any]=True , ):
_lowerCAmelCase :Tuple = size if size is not None else {'shortest_edge': 20}
_lowerCAmelCase :str = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_lowerCAmelCase :str = parent
_lowerCAmelCase :List[Any] = batch_size
_lowerCAmelCase :Optional[Any] = num_channels
_lowerCAmelCase :Optional[Any] = image_size
_lowerCAmelCase :int = min_resolution
_lowerCAmelCase :List[str] = max_resolution
_lowerCAmelCase :List[str] = do_resize
_lowerCAmelCase :Optional[int] = size
_lowerCAmelCase :str = do_center_crop
_lowerCAmelCase :int = crop_size
_lowerCAmelCase :Optional[int] = do_flip_channel_order
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Any = MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = MobileViTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self: str ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'center_crop' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_flip_channel_order' ) )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_lowerCAmelCase :Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self: int ):
# Initialize image_processing
_lowerCAmelCase :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
_lowerCAmelCase :Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :str = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
# Initialize image_processing
_lowerCAmelCase :int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :List[str] = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
# Initialize image_processing
_lowerCAmelCase :Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :int = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , ) | 687 | 1 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[str] = RoCBertTokenizer
lowerCamelCase : Any = None
lowerCamelCase : int = False
lowerCamelCase : Dict = True
lowerCamelCase : Tuple = filter_non_english
def SCREAMING_SNAKE_CASE__ ( self: str ):
super().setUp()
_lowerCAmelCase :List[str] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
_lowerCAmelCase :List[str] = {}
_lowerCAmelCase :List[str] = {}
for i, value in enumerate(_UpperCAmelCase ):
_lowerCAmelCase :Optional[Any] = i
_lowerCAmelCase :Tuple = i
_lowerCAmelCase :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase :str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
_lowerCAmelCase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(_UpperCAmelCase , _UpperCAmelCase , ensure_ascii=_UpperCAmelCase )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(_UpperCAmelCase , _UpperCAmelCase , ensure_ascii=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :int = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_lowerCAmelCase :Optional[int] = tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(_UpperCAmelCase , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :int = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Dict = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :str = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :List[Any] = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :Dict = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Any = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Optional[int] = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Tuple = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :List[Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
_lowerCAmelCase :str = {}
for i, token in enumerate(_UpperCAmelCase ):
_lowerCAmelCase :Optional[Any] = i
_lowerCAmelCase :Any = RoCBertWordpieceTokenizer(vocab=_UpperCAmelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def SCREAMING_SNAKE_CASE__ ( self: int ):
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :int = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_UpperCAmelCase ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
_lowerCAmelCase :Union[str, Any] = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(_UpperCAmelCase ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowerCAmelCase :Optional[int] = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :str = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
_lowerCAmelCase :Dict = tokenizer_r.encode_plus(
_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , )
_lowerCAmelCase :Optional[int] = tokenizer_r.do_lower_case if hasattr(_UpperCAmelCase , 'do_lower_case' ) else False
_lowerCAmelCase :Any = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :List[Any] = ['的', '人', '有']
_lowerCAmelCase :Optional[int] = ''.join(_UpperCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowerCAmelCase :Dict = True
_lowerCAmelCase :Optional[Any] = self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = tokenizer_p.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
_lowerCAmelCase :str = tokenizer_r.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
_lowerCAmelCase :List[Any] = tokenizer_r.convert_ids_to_tokens(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = tokenizer_p.convert_ids_to_tokens(_UpperCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Optional[int] = False
_lowerCAmelCase :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Any = self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = tokenizer_r.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = tokenizer_p.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
_lowerCAmelCase :Dict = tokenizer_r.convert_ids_to_tokens(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = tokenizer_p.convert_ids_to_tokens(_UpperCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
_lowerCAmelCase :Optional[Any] = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(_UpperCAmelCase )
]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Tuple = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_lowerCAmelCase :Optional[Any] = tokenizer.encode('你好' , add_special_tokens=_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = tokenizer.encode('你是谁' , add_special_tokens=_UpperCAmelCase )
_lowerCAmelCase :Tuple = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :int = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
_lowerCAmelCase :int = '你好,你是谁'
_lowerCAmelCase :Union[str, Any] = tokenizer.tokenize(_UpperCAmelCase )
_lowerCAmelCase :Any = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = tokenizer.convert_tokens_to_shape_ids(_UpperCAmelCase )
_lowerCAmelCase :Dict = tokenizer.convert_tokens_to_pronunciation_ids(_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = tokenizer.prepare_for_model(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
_lowerCAmelCase :str = tokenizer.encode_plus(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) | 687 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ (datasets.BuilderConfig ):
"""simple docstring"""
lowerCamelCase : Optional[datasets.Features] = None
class UpperCAmelCase_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCamelCase : Any = PandasConfig
def SCREAMING_SNAKE_CASE__ ( self: int ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: List[str] ):
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_lowerCAmelCase :Dict = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_UpperCAmelCase , (str, list, tuple) ):
_lowerCAmelCase :Any = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase :List[Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
_lowerCAmelCase :Any = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase :Union[str, Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={'files': files} ) )
return splits
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: pa.Table ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase :str = table_cast(_UpperCAmelCase , self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: Dict ):
for i, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase ) ):
with open(_UpperCAmelCase , 'rb' ) as f:
_lowerCAmelCase :Optional[Any] = pa.Table.from_pandas(pd.read_pickle(_UpperCAmelCase ) )
yield i, self._cast_table(_UpperCAmelCase ) | 687 | 1 |
def UpperCamelCase_( __magic_name__ : Dict , __magic_name__ : Optional[Any] ):
"""simple docstring"""
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(__magic_name__ ):
for j in range(__magic_name__ ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) , end='\t' )
else:
print('INF' , end='\t' )
print()
def UpperCamelCase_( __magic_name__ : Dict , __magic_name__ : Tuple ):
"""simple docstring"""
_lowerCAmelCase :Dict = [[float('inf' ) for _ in range(__magic_name__ )] for _ in range(__magic_name__ )]
for i in range(__magic_name__ ):
for j in range(__magic_name__ ):
_lowerCAmelCase :List[str] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__magic_name__ ):
# looping through rows of graph array
for i in range(__magic_name__ ):
# looping through columns of graph array
for j in range(__magic_name__ ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_lowerCAmelCase :List[Any] = dist[i][k] + dist[k][j]
_print_dist(__magic_name__ , __magic_name__ )
return dist, v
if __name__ == "__main__":
a = int(input("""Enter number of vertices: """))
a = int(input("""Enter number of edges: """))
a = [[float("""inf""") for i in range(v)] for j in range(v)]
for i in range(v):
a = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("""\nEdge """, i + 1)
a = int(input("""Enter source:"""))
a = int(input("""Enter destination:"""))
a = float(input("""Enter weight:"""))
a = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0 | 687 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
a = """"""
a = """"""
a = """"""
a = 1 # (0 is vertical, 1 is horizontal)
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase :Union[str, Any] = get_dataset(__magic_name__ , __magic_name__ )
print('Processing...' )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = update_image_and_anno(__magic_name__ , __magic_name__ , __magic_name__ )
for index, image in enumerate(__magic_name__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase :Optional[Any] = random_chars(32 )
_lowerCAmelCase :str = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
_lowerCAmelCase :Tuple = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , __magic_name__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(__magic_name__ )} with {file_name}""" )
_lowerCAmelCase :str = []
for anno in new_annos[index]:
_lowerCAmelCase :List[str] = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(__magic_name__ )
with open(f"""/{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :int = []
_lowerCAmelCase :Union[str, Any] = []
for label_file in glob.glob(os.path.join(__magic_name__ , '*.txt' ) ):
_lowerCAmelCase :Optional[int] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(__magic_name__ ) as in_file:
_lowerCAmelCase :Union[str, Any] = in_file.readlines()
_lowerCAmelCase :List[Any] = os.path.join(__magic_name__ , f"""{label_name}.jpg""" )
_lowerCAmelCase :Tuple = []
for obj_list in obj_lists:
_lowerCAmelCase :Union[str, Any] = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__magic_name__ )
labels.append(__magic_name__ )
return img_paths, labels
def UpperCamelCase_( __magic_name__ : list , __magic_name__ : list , __magic_name__ : int = 1 ):
"""simple docstring"""
_lowerCAmelCase :str = []
_lowerCAmelCase :Any = []
_lowerCAmelCase :Optional[Any] = []
for idx in range(len(__magic_name__ ) ):
_lowerCAmelCase :Optional[int] = []
_lowerCAmelCase :Optional[Any] = img_list[idx]
path_list.append(__magic_name__ )
_lowerCAmelCase :List[str] = anno_list[idx]
_lowerCAmelCase :Optional[Any] = cva.imread(__magic_name__ )
if flip_type == 1:
_lowerCAmelCase :List[Any] = cva.flip(__magic_name__ , __magic_name__ )
for bbox in img_annos:
_lowerCAmelCase :List[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_lowerCAmelCase :List[str] = cva.flip(__magic_name__ , __magic_name__ )
for bbox in img_annos:
_lowerCAmelCase :List[str] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__magic_name__ )
new_imgs_list.append(__magic_name__ )
return new_imgs_list, new_annos_lists, path_list
def UpperCamelCase_( __magic_name__ : int = 32 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase :str = ascii_lowercase + digits
return "".join(random.choice(__magic_name__ ) for _ in range(__magic_name__ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""") | 687 | 1 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Tuple = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] , **_UpperCAmelCase: Union[str, Any] ):
_lowerCAmelCase :str = {
'num_train_timesteps': 1000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**_UpperCAmelCase )
return config
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: str ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
self.check_over_configs(thresholding=_UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_UpperCAmelCase , prediction_type=_UpperCAmelCase , sample_max_value=_UpperCAmelCase , )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Dict = self.scheduler_classes[0]
_lowerCAmelCase :str = self.get_scheduler_config()
_lowerCAmelCase :Dict = scheduler_class(**_UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :List[str] = self.scheduler_classes[0]
_lowerCAmelCase :Dict = self.get_scheduler_config()
_lowerCAmelCase :Tuple = scheduler_class(**_UpperCAmelCase )
_lowerCAmelCase :List[Any] = len(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = self.dummy_model()
_lowerCAmelCase :int = self.dummy_sample_deter
_lowerCAmelCase :Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
_lowerCAmelCase :List[str] = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase :int = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase :str = pred_prev_sample
_lowerCAmelCase :int = torch.sum(torch.abs(_UpperCAmelCase ) )
_lowerCAmelCase :Dict = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Optional[int] = self.scheduler_classes[0]
_lowerCAmelCase :int = self.get_scheduler_config(prediction_type='v_prediction' )
_lowerCAmelCase :str = scheduler_class(**_UpperCAmelCase )
_lowerCAmelCase :List[str] = len(_UpperCAmelCase )
_lowerCAmelCase :str = self.dummy_model()
_lowerCAmelCase :Union[str, Any] = self.dummy_sample_deter
_lowerCAmelCase :List[str] = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
_lowerCAmelCase :List[str] = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase :Optional[Any] = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase :str = pred_prev_sample
_lowerCAmelCase :Optional[Any] = torch.sum(torch.abs(_UpperCAmelCase ) )
_lowerCAmelCase :Any = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase :Union[str, Any] = self.get_scheduler_config()
_lowerCAmelCase :Union[str, Any] = scheduler_class(**_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
_lowerCAmelCase :str = scheduler.timesteps
for i, timestep in enumerate(_UpperCAmelCase ):
if i == len(_UpperCAmelCase ) - 1:
_lowerCAmelCase :List[str] = -1
else:
_lowerCAmelCase :Optional[int] = timesteps[i + 1]
_lowerCAmelCase :Dict = scheduler.previous_timestep(_UpperCAmelCase )
_lowerCAmelCase :List[str] = prev_t.item()
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = self.scheduler_classes[0]
_lowerCAmelCase :Optional[int] = self.get_scheduler_config()
_lowerCAmelCase :List[str] = scheduler_class(**_UpperCAmelCase )
_lowerCAmelCase :List[str] = [100, 87, 50, 51, 0]
with self.assertRaises(_UpperCAmelCase , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :List[str] = self.scheduler_classes[0]
_lowerCAmelCase :Optional[Any] = self.get_scheduler_config()
_lowerCAmelCase :Optional[Any] = scheduler_class(**_UpperCAmelCase )
_lowerCAmelCase :int = [100, 87, 50, 1, 0]
_lowerCAmelCase :str = len(_UpperCAmelCase )
with self.assertRaises(_UpperCAmelCase , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_UpperCAmelCase , timesteps=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :List[str] = self.scheduler_classes[0]
_lowerCAmelCase :Optional[int] = self.get_scheduler_config()
_lowerCAmelCase :int = scheduler_class(**_UpperCAmelCase )
_lowerCAmelCase :str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_UpperCAmelCase , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase ) | 687 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
a = logging.get_logger(__name__)
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = nn.functional.normalize(__magic_name__ )
_lowerCAmelCase :List[str] = nn.functional.normalize(__magic_name__ )
return torch.mm(__magic_name__ , normalized_text_embeds.t() )
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : str = CLIPConfig
lowerCamelCase : Any = ['CLIPEncoderLayer']
def __init__( self: Optional[int] , _UpperCAmelCase: CLIPConfig ):
super().__init__(_UpperCAmelCase )
_lowerCAmelCase :Any = CLIPVisionModel(config.vision_config )
_lowerCAmelCase :Optional[int] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_UpperCAmelCase )
_lowerCAmelCase :int = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :Any = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :str = nn.Parameter(torch.ones(17 ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = nn.Parameter(torch.ones(3 ) , requires_grad=_UpperCAmelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Dict ):
_lowerCAmelCase :str = self.vision_model(_UpperCAmelCase )[1] # pooled_output
_lowerCAmelCase :Union[str, Any] = self.visual_projection(_UpperCAmelCase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase :Optional[int] = cosine_distance(_UpperCAmelCase , self.special_care_embeds ).cpu().float().numpy()
_lowerCAmelCase :List[str] = cosine_distance(_UpperCAmelCase , self.concept_embeds ).cpu().float().numpy()
_lowerCAmelCase :str = []
_lowerCAmelCase :List[Any] = image_embeds.shape[0]
for i in range(_UpperCAmelCase ):
_lowerCAmelCase :Optional[Any] = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
_lowerCAmelCase :List[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
_lowerCAmelCase :List[Any] = special_cos_dist[i][concept_idx]
_lowerCAmelCase :Dict = self.special_care_embeds_weights[concept_idx].item()
_lowerCAmelCase :List[Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} )
_lowerCAmelCase :Any = 0.0_1
for concept_idx in range(len(cos_dist[0] ) ):
_lowerCAmelCase :Union[str, Any] = cos_dist[i][concept_idx]
_lowerCAmelCase :str = self.concept_embeds_weights[concept_idx].item()
_lowerCAmelCase :str = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_UpperCAmelCase )
result.append(_UpperCAmelCase )
_lowerCAmelCase :Any = [len(res['bad_concepts'] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self: str , _UpperCAmelCase: torch.FloatTensor , _UpperCAmelCase: torch.FloatTensor ):
_lowerCAmelCase :Optional[int] = self.vision_model(_UpperCAmelCase )[1] # pooled_output
_lowerCAmelCase :Union[str, Any] = self.visual_projection(_UpperCAmelCase )
_lowerCAmelCase :Dict = cosine_distance(_UpperCAmelCase , self.special_care_embeds )
_lowerCAmelCase :List[str] = cosine_distance(_UpperCAmelCase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
_lowerCAmelCase :Any = 0.0
_lowerCAmelCase :Union[str, Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
_lowerCAmelCase :Tuple = torch.any(special_scores > 0 , dim=1 )
_lowerCAmelCase :List[str] = special_care * 0.0_1
_lowerCAmelCase :Any = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
_lowerCAmelCase :Optional[Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
_lowerCAmelCase :List[str] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts | 687 | 1 |
def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : Any ):
"""simple docstring"""
_lowerCAmelCase :Union[str, Any] = [0 for i in range(r + 1 )]
# nc0 = 1
_lowerCAmelCase :Optional[Any] = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_lowerCAmelCase :Dict = min(__magic_name__ , __magic_name__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5)) | 687 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a = 6_3_7_8_1_3_7.0
a = 6_3_5_6_7_5_2.3_1_4_2_4_5
a = 6_378_137
def UpperCamelCase_( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , __magic_name__ : float ):
"""simple docstring"""
_lowerCAmelCase :List[Any] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_lowerCAmelCase :Union[str, Any] = atan((1 - flattening) * tan(radians(__magic_name__ ) ) )
_lowerCAmelCase :List[str] = atan((1 - flattening) * tan(radians(__magic_name__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_lowerCAmelCase :int = haversine_distance(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_lowerCAmelCase :str = (b_lata + b_lata) / 2
_lowerCAmelCase :Tuple = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_lowerCAmelCase :str = (sin(__magic_name__ ) ** 2) * (cos(__magic_name__ ) ** 2)
_lowerCAmelCase :Optional[int] = cos(sigma / 2 ) ** 2
_lowerCAmelCase :List[Any] = (sigma - sin(__magic_name__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_lowerCAmelCase :Dict = (cos(__magic_name__ ) ** 2) * (sin(__magic_name__ ) ** 2)
_lowerCAmelCase :str = sin(sigma / 2 ) ** 2
_lowerCAmelCase :Union[str, Any] = (sigma + sin(__magic_name__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod() | 687 | 1 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: int , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=13 , _UpperCAmelCase: Optional[Any]=32 , _UpperCAmelCase: List[Any]=2 , _UpperCAmelCase: Optional[int]=3 , _UpperCAmelCase: Optional[int]=16 , _UpperCAmelCase: Optional[Any]=[32, 64, 128] , _UpperCAmelCase: Optional[int]=[1, 2, 1] , _UpperCAmelCase: int=[2, 2, 4] , _UpperCAmelCase: List[str]=2 , _UpperCAmelCase: Dict=2.0 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: str=0.0 , _UpperCAmelCase: int=0.0 , _UpperCAmelCase: str=0.1 , _UpperCAmelCase: Dict="gelu" , _UpperCAmelCase: Optional[Any]=False , _UpperCAmelCase: Union[str, Any]=True , _UpperCAmelCase: Union[str, Any]=0.0_2 , _UpperCAmelCase: Optional[int]=1e-5 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: Optional[Any]=None , _UpperCAmelCase: Tuple=True , _UpperCAmelCase: str=10 , _UpperCAmelCase: int=8 , _UpperCAmelCase: List[Any]=["stage1", "stage2"] , _UpperCAmelCase: List[Any]=[1, 2] , ):
_lowerCAmelCase :Optional[int] = parent
_lowerCAmelCase :Dict = batch_size
_lowerCAmelCase :Optional[Any] = image_size
_lowerCAmelCase :Optional[Any] = patch_size
_lowerCAmelCase :List[Any] = num_channels
_lowerCAmelCase :Optional[int] = embed_dim
_lowerCAmelCase :List[str] = hidden_sizes
_lowerCAmelCase :Union[str, Any] = depths
_lowerCAmelCase :int = num_heads
_lowerCAmelCase :Any = window_size
_lowerCAmelCase :List[Any] = mlp_ratio
_lowerCAmelCase :Optional[int] = qkv_bias
_lowerCAmelCase :Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase :Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase :Dict = drop_path_rate
_lowerCAmelCase :List[Any] = hidden_act
_lowerCAmelCase :Tuple = use_absolute_embeddings
_lowerCAmelCase :Optional[int] = patch_norm
_lowerCAmelCase :Optional[Any] = layer_norm_eps
_lowerCAmelCase :Union[str, Any] = initializer_range
_lowerCAmelCase :List[str] = is_training
_lowerCAmelCase :str = scope
_lowerCAmelCase :Optional[int] = use_labels
_lowerCAmelCase :List[Any] = type_sequence_label_size
_lowerCAmelCase :Union[str, Any] = encoder_stride
_lowerCAmelCase :Optional[int] = out_features
_lowerCAmelCase :List[str] = out_indices
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase :Dict = None
if self.use_labels:
_lowerCAmelCase :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase :str = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self: int ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple ):
_lowerCAmelCase :List[Any] = FocalNetModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCAmelCase :List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Union[str, Any] = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCAmelCase :Optional[int] = None
_lowerCAmelCase :Dict = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Any = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Any = FocalNetForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCAmelCase :List[Any] = 1
_lowerCAmelCase :List[Any] = FocalNetForMaskedImageModeling(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :int = model(_UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int , _UpperCAmelCase: Dict , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = self.type_sequence_label_size
_lowerCAmelCase :Dict = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase :Optional[int] = 1
_lowerCAmelCase :Tuple = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = config_and_inputs
_lowerCAmelCase :List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ (snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase : Optional[Any] = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Tuple = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Any = False
lowerCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = FocalNetModelTester(self )
_lowerCAmelCase :str = ConfigTester(self , config_class=_UpperCAmelCase , embed_dim=37 , has_text_modality=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
return
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def SCREAMING_SNAKE_CASE__ ( self: str ):
pass
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase :Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase , _lowerCAmelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Tuple = model_class(_UpperCAmelCase )
_lowerCAmelCase :Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase :int = [*signature.parameters.keys()]
_lowerCAmelCase :List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: int , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase :Optional[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_lowerCAmelCase :List[Any] = outputs.hidden_states
_lowerCAmelCase :str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# FocalNet has a different seq_length
_lowerCAmelCase :Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_lowerCAmelCase :List[str] = outputs.reshaped_hidden_states
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :int = reshaped_hidden_states[0].shape
_lowerCAmelCase :Optional[int] = (
reshaped_hidden_states[0].view(_UpperCAmelCase , _UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[int] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Dict = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase , _lowerCAmelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :str = 3
_lowerCAmelCase :Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCAmelCase :int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCAmelCase :Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :List[str] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Union[str, Any] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
@slow
def SCREAMING_SNAKE_CASE__ ( self: int ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase :List[Any] = FocalNetModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :Optional[int] = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
_lowerCAmelCase :str = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Tuple = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.default_image_processor
_lowerCAmelCase :Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCAmelCase :Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase :Dict = model(**_UpperCAmelCase )
# verify the logits
_lowerCAmelCase :str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
_lowerCAmelCase :Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : int = (FocalNetBackbone,) if is_torch_available() else ()
lowerCamelCase : str = FocalNetConfig
lowerCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Any = FocalNetModelTester(self ) | 687 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Dict = 'encoder-decoder'
lowerCamelCase : Optional[Any] = True
def __init__( self: str , **_UpperCAmelCase: int ):
super().__init__(**_UpperCAmelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_lowerCAmelCase :Optional[Any] = kwargs.pop('encoder' )
_lowerCAmelCase :Dict = encoder_config.pop('model_type' )
_lowerCAmelCase :str = kwargs.pop('decoder' )
_lowerCAmelCase :str = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase :str = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Tuple = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Any = True
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls: Tuple , _UpperCAmelCase: PretrainedConfig , _UpperCAmelCase: PretrainedConfig , **_UpperCAmelCase: str ):
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
_lowerCAmelCase :Dict = True
_lowerCAmelCase :List[str] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowerCAmelCase :Optional[int] = self.encoder.to_dict()
_lowerCAmelCase :Union[str, Any] = self.decoder.to_dict()
_lowerCAmelCase :List[str] = self.__class__.model_type
return output | 687 | 1 |
import itertools
import string
from collections.abc import Generator, Iterable
def UpperCamelCase_( __magic_name__ : Iterable[str] , __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = iter(__magic_name__ )
while True:
_lowerCAmelCase :List[Any] = tuple(itertools.islice(__magic_name__ , __magic_name__ ) )
if not chunk:
return
yield chunk
def UpperCamelCase_( __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
_lowerCAmelCase :Union[str, Any] = ''
if len(__magic_name__ ) < 2:
return dirty
for i in range(len(__magic_name__ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(__magic_name__ ) & 1:
clean += "X"
return clean
def UpperCamelCase_( __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :List[Any] = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
_lowerCAmelCase :List[str] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(__magic_name__ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(__magic_name__ )
return table
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :Tuple = generate_table(__magic_name__ )
_lowerCAmelCase :List[str] = prepare_input(__magic_name__ )
_lowerCAmelCase :List[Any] = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__magic_name__ , 2 ):
_lowerCAmelCase , _lowerCAmelCase :int = divmod(table.index(__magic_name__ ) , 5 )
_lowerCAmelCase , _lowerCAmelCase :List[str] = divmod(table.index(__magic_name__ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :List[Any] = generate_table(__magic_name__ )
_lowerCAmelCase :str = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__magic_name__ , 2 ):
_lowerCAmelCase , _lowerCAmelCase :List[str] = divmod(table.index(__magic_name__ ) , 5 )
_lowerCAmelCase , _lowerCAmelCase :List[str] = divmod(table.index(__magic_name__ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext | 687 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: int , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=13 , _UpperCAmelCase: Optional[Any]=32 , _UpperCAmelCase: List[Any]=2 , _UpperCAmelCase: Optional[int]=3 , _UpperCAmelCase: Optional[int]=16 , _UpperCAmelCase: Optional[Any]=[32, 64, 128] , _UpperCAmelCase: Optional[int]=[1, 2, 1] , _UpperCAmelCase: int=[2, 2, 4] , _UpperCAmelCase: List[str]=2 , _UpperCAmelCase: Dict=2.0 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: str=0.0 , _UpperCAmelCase: int=0.0 , _UpperCAmelCase: str=0.1 , _UpperCAmelCase: Dict="gelu" , _UpperCAmelCase: Optional[Any]=False , _UpperCAmelCase: Union[str, Any]=True , _UpperCAmelCase: Union[str, Any]=0.0_2 , _UpperCAmelCase: Optional[int]=1e-5 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: Optional[Any]=None , _UpperCAmelCase: Tuple=True , _UpperCAmelCase: str=10 , _UpperCAmelCase: int=8 , _UpperCAmelCase: List[Any]=["stage1", "stage2"] , _UpperCAmelCase: List[Any]=[1, 2] , ):
_lowerCAmelCase :Optional[int] = parent
_lowerCAmelCase :Dict = batch_size
_lowerCAmelCase :Optional[Any] = image_size
_lowerCAmelCase :Optional[Any] = patch_size
_lowerCAmelCase :List[Any] = num_channels
_lowerCAmelCase :Optional[int] = embed_dim
_lowerCAmelCase :List[str] = hidden_sizes
_lowerCAmelCase :Union[str, Any] = depths
_lowerCAmelCase :int = num_heads
_lowerCAmelCase :Any = window_size
_lowerCAmelCase :List[Any] = mlp_ratio
_lowerCAmelCase :Optional[int] = qkv_bias
_lowerCAmelCase :Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase :Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase :Dict = drop_path_rate
_lowerCAmelCase :List[Any] = hidden_act
_lowerCAmelCase :Tuple = use_absolute_embeddings
_lowerCAmelCase :Optional[int] = patch_norm
_lowerCAmelCase :Optional[Any] = layer_norm_eps
_lowerCAmelCase :Union[str, Any] = initializer_range
_lowerCAmelCase :List[str] = is_training
_lowerCAmelCase :str = scope
_lowerCAmelCase :Optional[int] = use_labels
_lowerCAmelCase :List[Any] = type_sequence_label_size
_lowerCAmelCase :Union[str, Any] = encoder_stride
_lowerCAmelCase :Optional[int] = out_features
_lowerCAmelCase :List[str] = out_indices
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase :Dict = None
if self.use_labels:
_lowerCAmelCase :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase :str = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self: int ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple ):
_lowerCAmelCase :List[Any] = FocalNetModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCAmelCase :List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Union[str, Any] = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCAmelCase :Optional[int] = None
_lowerCAmelCase :Dict = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Any = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Any = FocalNetForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCAmelCase :List[Any] = 1
_lowerCAmelCase :List[Any] = FocalNetForMaskedImageModeling(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :int = model(_UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int , _UpperCAmelCase: Dict , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = self.type_sequence_label_size
_lowerCAmelCase :Dict = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase :Optional[int] = 1
_lowerCAmelCase :Tuple = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = config_and_inputs
_lowerCAmelCase :List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ (snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase : Optional[Any] = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Tuple = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Any = False
lowerCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = FocalNetModelTester(self )
_lowerCAmelCase :str = ConfigTester(self , config_class=_UpperCAmelCase , embed_dim=37 , has_text_modality=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
return
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def SCREAMING_SNAKE_CASE__ ( self: str ):
pass
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase :Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase , _lowerCAmelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Tuple = model_class(_UpperCAmelCase )
_lowerCAmelCase :Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase :int = [*signature.parameters.keys()]
_lowerCAmelCase :List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: int , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase :Optional[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_lowerCAmelCase :List[Any] = outputs.hidden_states
_lowerCAmelCase :str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# FocalNet has a different seq_length
_lowerCAmelCase :Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_lowerCAmelCase :List[str] = outputs.reshaped_hidden_states
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :int = reshaped_hidden_states[0].shape
_lowerCAmelCase :Optional[int] = (
reshaped_hidden_states[0].view(_UpperCAmelCase , _UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[int] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Dict = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase , _lowerCAmelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :str = 3
_lowerCAmelCase :Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCAmelCase :int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCAmelCase :Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :List[str] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Union[str, Any] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
@slow
def SCREAMING_SNAKE_CASE__ ( self: int ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase :List[Any] = FocalNetModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :Optional[int] = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
_lowerCAmelCase :str = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Tuple = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.default_image_processor
_lowerCAmelCase :Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCAmelCase :Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase :Dict = model(**_UpperCAmelCase )
# verify the logits
_lowerCAmelCase :str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
_lowerCAmelCase :Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : int = (FocalNetBackbone,) if is_torch_available() else ()
lowerCamelCase : str = FocalNetConfig
lowerCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Any = FocalNetModelTester(self ) | 687 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : int = 'bert'
def __init__( self: Optional[Any] , _UpperCAmelCase: Tuple=3_0522 , _UpperCAmelCase: int=768 , _UpperCAmelCase: Union[str, Any]=12 , _UpperCAmelCase: Dict=12 , _UpperCAmelCase: List[Any]=3072 , _UpperCAmelCase: List[Any]="gelu" , _UpperCAmelCase: Union[str, Any]=0.1 , _UpperCAmelCase: Dict=0.1 , _UpperCAmelCase: List[Any]=512 , _UpperCAmelCase: Optional[Any]=2 , _UpperCAmelCase: Optional[int]=0.0_2 , _UpperCAmelCase: Any=1e-1_2 , _UpperCAmelCase: Optional[Any]=0 , _UpperCAmelCase: Union[str, Any]="absolute" , _UpperCAmelCase: Dict=True , _UpperCAmelCase: Optional[Any]=None , **_UpperCAmelCase: Optional[int] , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :List[Any] = vocab_size
_lowerCAmelCase :Tuple = hidden_size
_lowerCAmelCase :Dict = num_hidden_layers
_lowerCAmelCase :Optional[Any] = num_attention_heads
_lowerCAmelCase :List[Any] = hidden_act
_lowerCAmelCase :int = intermediate_size
_lowerCAmelCase :Tuple = hidden_dropout_prob
_lowerCAmelCase :Tuple = attention_probs_dropout_prob
_lowerCAmelCase :List[Any] = max_position_embeddings
_lowerCAmelCase :Dict = type_vocab_size
_lowerCAmelCase :Any = initializer_range
_lowerCAmelCase :int = layer_norm_eps
_lowerCAmelCase :List[Any] = position_embedding_type
_lowerCAmelCase :int = use_cache
_lowerCAmelCase :Union[str, Any] = classifier_dropout
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
if self.task == "multiple-choice":
_lowerCAmelCase :List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase :Any = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] ) | 687 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
a = HfApi()
a = {}
# fmt: off
a = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
a = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
a = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
a = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
a = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
a = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
a = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
a = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
a = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
a = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
a = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
a = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
a = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
a = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
a = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
a = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
a = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith("""CompVis"""):
a = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
a = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
a = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
a = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
a = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3
)
print(F'''{mod.modelId} has passed successfully!!!''') | 687 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : int = 'vivit'
def __init__( self: List[Any] , _UpperCAmelCase: Optional[int]=224 , _UpperCAmelCase: int=32 , _UpperCAmelCase: List[Any]=[2, 16, 16] , _UpperCAmelCase: Dict=3 , _UpperCAmelCase: Optional[int]=768 , _UpperCAmelCase: Any=12 , _UpperCAmelCase: Any=12 , _UpperCAmelCase: List[Any]=3072 , _UpperCAmelCase: List[Any]="gelu_fast" , _UpperCAmelCase: List[Any]=0.0 , _UpperCAmelCase: Optional[Any]=0.0 , _UpperCAmelCase: int=0.0_2 , _UpperCAmelCase: Optional[Any]=1e-0_6 , _UpperCAmelCase: Tuple=True , **_UpperCAmelCase: List[Any] , ):
_lowerCAmelCase :List[Any] = hidden_size
_lowerCAmelCase :Optional[Any] = num_hidden_layers
_lowerCAmelCase :Union[str, Any] = num_attention_heads
_lowerCAmelCase :str = intermediate_size
_lowerCAmelCase :List[Any] = hidden_act
_lowerCAmelCase :int = hidden_dropout_prob
_lowerCAmelCase :Tuple = attention_probs_dropout_prob
_lowerCAmelCase :Tuple = initializer_range
_lowerCAmelCase :List[Any] = layer_norm_eps
_lowerCAmelCase :List[Any] = image_size
_lowerCAmelCase :str = num_frames
_lowerCAmelCase :Union[str, Any] = tubelet_size
_lowerCAmelCase :int = num_channels
_lowerCAmelCase :Dict = qkv_bias
super().__init__(**_UpperCAmelCase ) | 687 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Optional[int] = 10
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :str = [1, 2, 3, 4]
_lowerCAmelCase :Union[str, Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_lowerCAmelCase :Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :List[str] = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
_lowerCAmelCase , _lowerCAmelCase :Optional[Any] = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [] )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Optional[int] = ''
_lowerCAmelCase , _lowerCAmelCase :str = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [] )
self.assertEqual(_UpperCAmelCase , [] )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Optional[Any] = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
_lowerCAmelCase , _lowerCAmelCase :Optional[int] = process_story(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Optional[int] = ['It was the best of times.']
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :Union[str, Any] = torch.tensor([1, 2, 3, 4] )
_lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 0 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :List[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_lowerCAmelCase :Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 23 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 1 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :List[str] = 101
_lowerCAmelCase :Dict = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_lowerCAmelCase :int = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_lowerCAmelCase :List[str] = compute_token_type_ids(_UpperCAmelCase , _UpperCAmelCase )
np.testing.assert_array_equal(_UpperCAmelCase , _UpperCAmelCase ) | 687 | 1 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : str = VideoToVideoSDPipeline
lowerCamelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'video'} ) - {'image', 'width', 'height'}
lowerCamelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'video'} ) - {'image'}
lowerCamelCase : int = PipelineTesterMixin.required_optional_params - {'latents'}
lowerCamelCase : Any = False
# No `output_type`.
lowerCamelCase : str = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
torch.manual_seed(0 )
_lowerCAmelCase :str = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
_lowerCAmelCase :Union[str, Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
torch.manual_seed(0 )
_lowerCAmelCase :int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_lowerCAmelCase :Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
_lowerCAmelCase :int = CLIPTextModel(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase :str = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: List[Any]=0 ):
# 3 frames
_lowerCAmelCase :int = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith('mps' ):
_lowerCAmelCase :List[str] = torch.manual_seed(_UpperCAmelCase )
else:
_lowerCAmelCase :Tuple = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
_lowerCAmelCase :List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase :Optional[Any] = self.get_dummy_components()
_lowerCAmelCase :Union[str, Any] = VideoToVideoSDPipeline(**_UpperCAmelCase )
_lowerCAmelCase :int = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_lowerCAmelCase :Any = self.get_dummy_inputs(_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = 'np'
_lowerCAmelCase :Optional[int] = sd_pipe(**_UpperCAmelCase ).frames
_lowerCAmelCase :str = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
_lowerCAmelCase :int = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_UpperCAmelCase , expected_max_diff=5e-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self: Any ):
return super().test_progress_bar()
@slow
@skip_mps
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :Optional[Any] = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
_lowerCAmelCase :List[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase :Tuple = torch.randn((1, 10, 3, 1024, 576) , generator=_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = video.to('cuda' )
_lowerCAmelCase :List[Any] = 'Spiderman is surfing'
_lowerCAmelCase :Any = pipe(_UpperCAmelCase , video=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=3 , output_type='pt' ).frames
_lowerCAmelCase :Dict = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2 | 687 |
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
a = int(input("""Enter number: """).strip())
print(F'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''') | 687 | 1 |
import os
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :str = os.path.join(os.path.dirname(__magic_name__ ) , 'num.txt' )
with open(__magic_name__ ) as file_hand:
return str(sum(int(__magic_name__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution()) | 687 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: List[Any] , _UpperCAmelCase: int , _UpperCAmelCase: MutableSequence[float] ):
if len(_UpperCAmelCase ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
_lowerCAmelCase :list[float] = list(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = degree
def __add__( self: str , _UpperCAmelCase: Polynomial ):
if self.degree > polynomial_a.degree:
_lowerCAmelCase :Any = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _UpperCAmelCase )
else:
_lowerCAmelCase :List[Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _UpperCAmelCase )
def __sub__( self: str , _UpperCAmelCase: Polynomial ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self: Union[str, Any] ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self: int , _UpperCAmelCase: Polynomial ):
_lowerCAmelCase :list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: int | float ):
_lowerCAmelCase :int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self: Union[str, Any] ):
_lowerCAmelCase :Dict = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_UpperCAmelCase )
return polynomial
def __repr__( self: Optional[Any] ):
return self.__str__()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :list[float] = [0] * self.degree
for i in range(self.degree ):
_lowerCAmelCase :Tuple = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int | float = 0 ):
_lowerCAmelCase :list[float] = [0] * (self.degree + 2)
_lowerCAmelCase :str = constant
for i in range(self.degree + 1 ):
_lowerCAmelCase :List[str] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _UpperCAmelCase )
def __eq__( self: List[Any] , _UpperCAmelCase: object ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self: Optional[Any] , _UpperCAmelCase: object ):
return not self.__eq__(_UpperCAmelCase ) | 687 | 1 |
def UpperCamelCase_( __magic_name__ : int | float | str ):
"""simple docstring"""
try:
_lowerCAmelCase :List[str] = float(__magic_name__ )
except ValueError:
raise ValueError('Please enter a valid number' )
_lowerCAmelCase :str = decimal - int(__magic_name__ )
if fractional_part == 0:
return int(__magic_name__ ), 1
else:
_lowerCAmelCase :List[str] = len(str(__magic_name__ ).split('.' )[1] )
_lowerCAmelCase :List[str] = int(decimal * (10**number_of_frac_digits) )
_lowerCAmelCase :List[str] = 10**number_of_frac_digits
_lowerCAmelCase , _lowerCAmelCase :List[str] = denominator, numerator
while True:
_lowerCAmelCase :int = dividend % divisor
if remainder == 0:
break
_lowerCAmelCase , _lowerCAmelCase :int = divisor, remainder
_lowerCAmelCase , _lowerCAmelCase :Any = numerator / divisor, denominator / divisor
return int(__magic_name__ ), int(__magic_name__ )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(8_9.0) = }''')
print(F'''{decimal_to_fraction('67') = }''')
print(F'''{decimal_to_fraction('45.0') = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction('6.25') = }''')
print(F'''{decimal_to_fraction('78td') = }''') | 687 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 | 1 |
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError('only integers accepted as input' )
else:
_lowerCAmelCase :str = str(abs(__magic_name__ ) )
_lowerCAmelCase :Optional[int] = [list(__magic_name__ ) for char in range(len(__magic_name__ ) )]
for index in range(len(__magic_name__ ) ):
num_transpositions[index].pop(__magic_name__ )
return max(
int(''.join(list(__magic_name__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod() | 687 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : float | Decimal , __magic_name__ : float = 10**-10 ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = a
while True:
_lowerCAmelCase :str = Decimal(__magic_name__ ) - (
Decimal(eval(__magic_name__ ) ) / Decimal(eval(str(diff(__magic_name__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__magic_name__ ) ) < precision: # noqa: S307
return float(__magic_name__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''') | 687 | 1 |
def UpperCamelCase_( __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :List[str] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowerCAmelCase :Any = ''
_lowerCAmelCase :Dict = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__magic_name__ ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowerCAmelCase , _lowerCAmelCase :Union[str, Any] = 0, 0
# length[i] shows the length of palindromic substring with center i
_lowerCAmelCase :Tuple = [1 for i in range(len(__magic_name__ ) )]
# for each character in new_string find corresponding palindromic string
_lowerCAmelCase :Any = 0
for j in range(len(__magic_name__ ) ):
_lowerCAmelCase :str = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__magic_name__ )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowerCAmelCase :int = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowerCAmelCase :str = j - k + 1 # noqa: E741
_lowerCAmelCase :Optional[int] = j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowerCAmelCase :List[Any] = length[j]
_lowerCAmelCase :Optional[Any] = j
# create that string
_lowerCAmelCase :Tuple = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod() | 687 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
a = {
"""sample_size""": 32,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [32, 64],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a = {
"""sample_size""": 64,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a = {
"""sample_size""": 256,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a = {
"""num_train_timesteps""": 40,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
a = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
a = {
"""num_train_timesteps""": 151,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
def UpperCamelCase_( __magic_name__ : Dict ):
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any]=False ):
"""simple docstring"""
_lowerCAmelCase :int = checkpoint[f"""{old_prefix}.in_layers.0.weight"""]
_lowerCAmelCase :Union[str, Any] = checkpoint[f"""{old_prefix}.in_layers.0.bias"""]
_lowerCAmelCase :str = checkpoint[f"""{old_prefix}.in_layers.2.weight"""]
_lowerCAmelCase :Optional[Any] = checkpoint[f"""{old_prefix}.in_layers.2.bias"""]
_lowerCAmelCase :str = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""]
_lowerCAmelCase :Any = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""]
_lowerCAmelCase :str = checkpoint[f"""{old_prefix}.out_layers.0.weight"""]
_lowerCAmelCase :List[Any] = checkpoint[f"""{old_prefix}.out_layers.0.bias"""]
_lowerCAmelCase :Optional[int] = checkpoint[f"""{old_prefix}.out_layers.3.weight"""]
_lowerCAmelCase :Dict = checkpoint[f"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
_lowerCAmelCase :List[Any] = checkpoint[f"""{old_prefix}.skip_connection.weight"""]
_lowerCAmelCase :int = checkpoint[f"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : List[str]=None ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Tuple = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Any = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
_lowerCAmelCase :int = checkpoint[f"""{old_prefix}.norm.weight"""]
_lowerCAmelCase :Dict = checkpoint[f"""{old_prefix}.norm.bias"""]
_lowerCAmelCase :Dict = weight_q.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :str = bias_q.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :List[str] = weight_k.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :Optional[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :Tuple = weight_v.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :List[Any] = bias_v.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :int = (
checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
_lowerCAmelCase :Optional[Any] = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Optional[Any] ):
"""simple docstring"""
_lowerCAmelCase :Union[str, Any] = torch.load(__magic_name__ , map_location='cpu' )
_lowerCAmelCase :List[Any] = {}
_lowerCAmelCase :List[str] = checkpoint['time_embed.0.weight']
_lowerCAmelCase :Tuple = checkpoint['time_embed.0.bias']
_lowerCAmelCase :Dict = checkpoint['time_embed.2.weight']
_lowerCAmelCase :Union[str, Any] = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_lowerCAmelCase :Union[str, Any] = checkpoint['label_emb.weight']
_lowerCAmelCase :str = checkpoint['input_blocks.0.0.weight']
_lowerCAmelCase :str = checkpoint['input_blocks.0.0.bias']
_lowerCAmelCase :List[Any] = unet_config['down_block_types']
_lowerCAmelCase :Any = unet_config['layers_per_block']
_lowerCAmelCase :List[Any] = unet_config['attention_head_dim']
_lowerCAmelCase :Tuple = unet_config['block_out_channels']
_lowerCAmelCase :List[str] = 1
_lowerCAmelCase :Optional[int] = channels_list[0]
for i, layer_type in enumerate(__magic_name__ ):
_lowerCAmelCase :Tuple = channels_list[i]
_lowerCAmelCase :Optional[Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__magic_name__ ):
_lowerCAmelCase :int = f"""down_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :List[Any] = f"""input_blocks.{current_layer}.0"""
_lowerCAmelCase :int = True if j == 0 and downsample_block_has_skip else False
_lowerCAmelCase :List[Any] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__magic_name__ ):
_lowerCAmelCase :List[str] = f"""down_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :Optional[int] = f"""input_blocks.{current_layer}.0"""
_lowerCAmelCase :List[str] = True if j == 0 and downsample_block_has_skip else False
_lowerCAmelCase :Optional[int] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
_lowerCAmelCase :Optional[int] = f"""down_blocks.{i}.attentions.{j}"""
_lowerCAmelCase :str = f"""input_blocks.{current_layer}.1"""
_lowerCAmelCase :Optional[Any] = convert_attention(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
_lowerCAmelCase :Union[str, Any] = f"""down_blocks.{i}.downsamplers.0"""
_lowerCAmelCase :Tuple = f"""input_blocks.{current_layer}.0"""
_lowerCAmelCase :Optional[int] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
current_layer += 1
_lowerCAmelCase :Dict = current_channels
# hardcoded the mid-block for now
_lowerCAmelCase :int = 'mid_block.resnets.0'
_lowerCAmelCase :Optional[Any] = 'middle_block.0'
_lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Optional[int] = 'mid_block.attentions.0'
_lowerCAmelCase :Optional[int] = 'middle_block.1'
_lowerCAmelCase :List[Any] = convert_attention(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Union[str, Any] = 'mid_block.resnets.1'
_lowerCAmelCase :Optional[int] = 'middle_block.2'
_lowerCAmelCase :int = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Tuple = 0
_lowerCAmelCase :str = unet_config['up_block_types']
for i, layer_type in enumerate(__magic_name__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCAmelCase :Optional[Any] = f"""up_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :Dict = f"""output_blocks.{current_layer}.0"""
_lowerCAmelCase :Any = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
_lowerCAmelCase :Any = f"""up_blocks.{i}.upsamplers.0"""
_lowerCAmelCase :Dict = f"""output_blocks.{current_layer-1}.1"""
_lowerCAmelCase :Tuple = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCAmelCase :Tuple = f"""up_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :List[str] = f"""output_blocks.{current_layer}.0"""
_lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
_lowerCAmelCase :str = f"""up_blocks.{i}.attentions.{j}"""
_lowerCAmelCase :List[Any] = f"""output_blocks.{current_layer}.1"""
_lowerCAmelCase :int = convert_attention(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
_lowerCAmelCase :Optional[int] = f"""up_blocks.{i}.upsamplers.0"""
_lowerCAmelCase :int = f"""output_blocks.{current_layer-1}.2"""
_lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :str = checkpoint['out.0.weight']
_lowerCAmelCase :Union[str, Any] = checkpoint['out.0.bias']
_lowerCAmelCase :List[Any] = checkpoint['out.2.weight']
_lowerCAmelCase :Dict = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
a = parser.parse_args()
a = strabool(args.class_cond)
a = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
a = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
a = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
a = None
a = con_pt_to_diffuser(args.unet_path, unet_config)
a = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
a = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
a = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
a = CMStochasticIterativeScheduler(**scheduler_config)
a = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path) | 687 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
a = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
a = """ \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
_lowerCAmelCase :Tuple = self.diffusers_dir
shutil.copy(
os.path.join(_UpperCAmelCase , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :str = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=None ):
_lowerCAmelCase :int = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_lowerCAmelCase :Dict = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_lowerCAmelCase :Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_lowerCAmelCase :List[str] = black.format_str(_UpperCAmelCase , mode=_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(_UpperCAmelCase , 'w' , newline='\n' ) as f:
f.write(_UpperCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_UpperCAmelCase )
with open(_UpperCAmelCase , 'r' ) as f:
self.assertTrue(f.read() , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :List[str] = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
# Base copy consistency
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , _UpperCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , )
# Copy consistency with a really long name
_lowerCAmelCase :Optional[int] = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , _UpperCAmelCase , _UpperCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , _UpperCAmelCase , overwrite_result=re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , ) | 687 | 1 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :List[str] = torch.load(__magic_name__ , map_location='cpu' )
_lowerCAmelCase :Optional[Any] = chkpt['model']
# We have the base model one level deeper than the original XLM repository
_lowerCAmelCase :Dict = {}
for k, v in state_dict.items():
if "pred_layer" in k:
_lowerCAmelCase :Any = v
else:
_lowerCAmelCase :Dict = v
_lowerCAmelCase :Optional[int] = chkpt['params']
_lowerCAmelCase :str = {n: v for n, v in config.items() if not isinstance(__magic_name__ , (torch.FloatTensor, numpy.ndarray) )}
_lowerCAmelCase :Dict = chkpt['dico_word2id']
_lowerCAmelCase :Any = {s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@' , '' ): i for s, i in vocab.items()}
# Save pytorch-model
_lowerCAmelCase :List[str] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
_lowerCAmelCase :Tuple = pytorch_dump_folder_path + '/' + CONFIG_NAME
_lowerCAmelCase :Optional[Any] = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(__magic_name__ , __magic_name__ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(__magic_name__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__magic_name__ , indent=2 ) + '\n' )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(__magic_name__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__magic_name__ , indent=2 ) + '\n' )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path) | 687 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be trained.'} )
lowerCamelCase : Optional[str] = field(
default='./' , metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path of training dataset.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for training.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for evaluation.'} )
lowerCamelCase : Optional[float] = field(default=0.1 , metadata={'help': 'Value of weight decay.'} )
lowerCamelCase : Optional[int] = field(
default=1_00_00 , metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} )
lowerCamelCase : Optional[float] = field(default=2e-4 , metadata={'help': 'Learning rate fo training.'} )
lowerCamelCase : Optional[str] = field(default='cosine' , metadata={'help': 'Learning rate.'} )
lowerCamelCase : Optional[int] = field(
default=7_50 , metadata={'help': 'Number of warmup steps in the learning rate schedule.'} )
lowerCamelCase : Optional[int] = field(
default=16 , metadata={'help': 'Number of gradient accumulation steps.'} )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} )
lowerCamelCase : Optional[int] = field(default=5_00_00 , metadata={'help': 'Maximum number of training steps.'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Sequence lengths used for training.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Training seed.'} )
lowerCamelCase : Optional[int] = field(
default=10_24 , metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} , )
lowerCamelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'States path if the training should continue from a checkpoint folder.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'If True the data is pretokenized.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size used for evaluation.'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Length of sequences to be evaluated.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} )
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} , )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Sample from the language model\'s output distribution.'} )
lowerCamelCase : Optional[float] = field(default=0.2 , metadata={'help': 'Sampling temperature used for generation.'} )
lowerCamelCase : Optional[int] = field(default=2_56 , metadata={'help': 'Maximum number of newly generated tokens.'} )
lowerCamelCase : Optional[int] = field(default=0 , metadata={'help': 'Top-k parameter used for generation.'} )
lowerCamelCase : Optional[float] = field(default=0.95 , metadata={'help': 'Top-p parameter used for nucleus sampling.'} )
lowerCamelCase : Optional[int] = field(default=10 , metadata={'help': 'Number of generations to run in parallel.'} )
lowerCamelCase : Optional[int] = field(
default=2_00 , metadata={'help': 'Number of completions to generate for each sample.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase : Optional[str] = field(
default='eval_results.json' , metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase : Optional[str] = field(
default='0' , metadata={'help': 'Allow `code_eval` to execute Python code on machine'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={
'help': (
'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'
' number corresponds to which GPU device id to run on.'
)
} , )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={
'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'
} , )
lowerCamelCase : Optional[str] = field(
default='transformersbook/codeparrot' , metadata={'help': 'Folder or name of dataset to process.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot-clean' , metadata={'help': 'Folder to save processed processed dataset.'} )
lowerCamelCase : Optional[int] = field(
default=10_00_00 , metadata={'help': 'Number of files to save per JSON output file.'} )
lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase : Optional[float] = field(
default=10_00 , metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=1_00 , metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=0.25 , metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=1.5 , metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=0.7 , metadata={'help': 'Probability for filtering config, test and uncommon files.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} , )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'If True, near-duplicate samples are removed.'} )
lowerCamelCase : Optional[float] = field(
default=0.85 , metadata={'help': 'Jaccard threshold for near-duplicate samples.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='gpt2' , metadata={'help': 'Base tokenizer to build new tokenizer from.'} )
lowerCamelCase : Optional[str] = field(
default='transformersbook/codeparrot-train' , metadata={'help': 'Dataset to train tokenizer on.'} )
lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase : Optional[int] = field(default=20_00_00 , metadata={'help': 'Number of examples to train tokenizer on.'} )
lowerCamelCase : Optional[int] = field(
default=3_27_68 , metadata={'help': 'Number of examples to train the tokenizer on.'} )
lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of new tokenizer.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path to the dataset to pretokenize.'} )
lowerCamelCase : Optional[str] = field(
default='tokenized-codeparrot-train' , metadata={'help': 'Repo name of the pretokenized data.'} )
lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='gpt2-large' , metadata={'help': 'Configuration to use for model initialization.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Tokenizer attached to model.'} )
lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of the created model.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} ) | 687 | 1 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Tuple = BlenderbotConfig
lowerCamelCase : List[str] = {}
lowerCamelCase : List[str] = 'gelu'
def __init__( self: int , _UpperCAmelCase: Dict , _UpperCAmelCase: List[str]=13 , _UpperCAmelCase: Union[str, Any]=7 , _UpperCAmelCase: int=True , _UpperCAmelCase: Tuple=False , _UpperCAmelCase: int=99 , _UpperCAmelCase: List[Any]=32 , _UpperCAmelCase: str=2 , _UpperCAmelCase: Union[str, Any]=4 , _UpperCAmelCase: List[str]=37 , _UpperCAmelCase: Optional[Any]=0.1 , _UpperCAmelCase: Any=0.1 , _UpperCAmelCase: str=20 , _UpperCAmelCase: Optional[Any]=2 , _UpperCAmelCase: int=1 , _UpperCAmelCase: int=0 , ):
_lowerCAmelCase :Optional[Any] = parent
_lowerCAmelCase :int = batch_size
_lowerCAmelCase :Any = seq_length
_lowerCAmelCase :List[str] = is_training
_lowerCAmelCase :Union[str, Any] = use_labels
_lowerCAmelCase :Any = vocab_size
_lowerCAmelCase :Union[str, Any] = hidden_size
_lowerCAmelCase :Union[str, Any] = num_hidden_layers
_lowerCAmelCase :Union[str, Any] = num_attention_heads
_lowerCAmelCase :Dict = intermediate_size
_lowerCAmelCase :int = hidden_dropout_prob
_lowerCAmelCase :Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase :Optional[int] = max_position_embeddings
_lowerCAmelCase :Optional[int] = eos_token_id
_lowerCAmelCase :List[Any] = pad_token_id
_lowerCAmelCase :List[str] = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Dict = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowerCAmelCase :Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowerCAmelCase :List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
_lowerCAmelCase :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase :Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_lowerCAmelCase :Optional[Any] = prepare_blenderbot_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Any ):
_lowerCAmelCase :Optional[int] = TFBlenderbotModel(config=_UpperCAmelCase ).get_decoder()
_lowerCAmelCase :str = inputs_dict['input_ids']
_lowerCAmelCase :Tuple = input_ids[:1, :]
_lowerCAmelCase :Dict = inputs_dict['attention_mask'][:1, :]
_lowerCAmelCase :List[Any] = inputs_dict['head_mask']
_lowerCAmelCase :Dict = 1
# first forward pass
_lowerCAmelCase :List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase :Optional[int] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase :str = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCAmelCase :Tuple = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_lowerCAmelCase :Optional[int] = tf.concat([input_ids, next_tokens] , axis=-1 )
_lowerCAmelCase :Optional[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_lowerCAmelCase :Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
_lowerCAmelCase :Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_lowerCAmelCase :str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_lowerCAmelCase :Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
_lowerCAmelCase :int = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCAmelCase , _UpperCAmelCase , rtol=1e-3 )
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any]=None , __magic_name__ : Dict=None , __magic_name__ : List[str]=None , __magic_name__ : int=None , __magic_name__ : Dict=None , ):
"""simple docstring"""
if attention_mask is None:
_lowerCAmelCase :Dict = tf.cast(tf.math.not_equal(__magic_name__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_lowerCAmelCase :str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_lowerCAmelCase :Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCAmelCase :List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowerCAmelCase :Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase_ (snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
lowerCamelCase : Optional[Any] = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase : str = (
{
'conversational': TFBlenderbotForConditionalGeneration,
'feature-extraction': TFBlenderbotModel,
'summarization': TFBlenderbotForConditionalGeneration,
'text2text-generation': TFBlenderbotForConditionalGeneration,
'translation': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : Any = False
lowerCamelCase : Any = False
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :Tuple = TFBlenderbotModelTester(self )
_lowerCAmelCase :Union[str, Any] = ConfigTester(self , config_class=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: str ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
@require_tokenizers
@require_tf
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = ['My friends are cool but they eat too many carbs.']
lowerCamelCase : Tuple = 'facebook/blenderbot-400M-distill'
@cached_property
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :Any = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Dict = self.tokenizer(self.src_text , return_tensors='tf' )
_lowerCAmelCase :int = self.model.generate(
model_inputs.input_ids , )
_lowerCAmelCase :List[str] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_UpperCAmelCase )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
) | 687 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :List[str] = 'ylacombe/bark-small'
_lowerCAmelCase :int = tempfile.mkdtemp()
_lowerCAmelCase :List[str] = 'en_speaker_1'
_lowerCAmelCase :Union[str, Any] = 'This is a test string'
_lowerCAmelCase :List[Any] = 'speaker_embeddings_path.json'
_lowerCAmelCase :str = 'speaker_embeddings'
def SCREAMING_SNAKE_CASE__ ( self: str , **_UpperCAmelCase: Optional[Any] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :List[Any] = self.get_tokenizer()
_lowerCAmelCase :List[str] = BarkProcessor(tokenizer=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase :List[str] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_lowerCAmelCase :Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_lowerCAmelCase :Any = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_lowerCAmelCase :List[Any] = 35
_lowerCAmelCase :Optional[int] = 2
_lowerCAmelCase :Dict = 8
_lowerCAmelCase :Dict = {
'semantic_prompt': np.ones(_UpperCAmelCase ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_lowerCAmelCase :Dict = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
_lowerCAmelCase :List[Any] = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_lowerCAmelCase :int = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Dict = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_lowerCAmelCase :Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Tuple = self.get_tokenizer()
_lowerCAmelCase :Union[str, Any] = BarkProcessor(tokenizer=_UpperCAmelCase )
_lowerCAmelCase :List[Any] = processor(text=self.input_string )
_lowerCAmelCase :List[str] = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() ) | 687 | 1 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
a = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def __init__( self: List[Any] , _UpperCAmelCase: AutoencoderKL , _UpperCAmelCase: CLIPTextModel , _UpperCAmelCase: CLIPTokenizer , _UpperCAmelCase: UNetaDConditionModel , _UpperCAmelCase: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _UpperCAmelCase: StableDiffusionSafetyChecker , _UpperCAmelCase: CLIPImageProcessor , ):
super().__init__()
self.register_modules(
vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , )
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowerCAmelCase :List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
self.enable_attention_slicing(_UpperCAmelCase )
@torch.no_grad()
def __call__( self: Optional[Any] , _UpperCAmelCase: Union[str, List[str]] , _UpperCAmelCase: int = 512 , _UpperCAmelCase: int = 512 , _UpperCAmelCase: int = 50 , _UpperCAmelCase: float = 7.5 , _UpperCAmelCase: Optional[Union[str, List[str]]] = None , _UpperCAmelCase: Optional[int] = 1 , _UpperCAmelCase: float = 0.0 , _UpperCAmelCase: Optional[torch.Generator] = None , _UpperCAmelCase: Optional[torch.FloatTensor] = None , _UpperCAmelCase: Optional[str] = "pil" , _UpperCAmelCase: bool = True , _UpperCAmelCase: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _UpperCAmelCase: int = 1 , _UpperCAmelCase: Optional[torch.FloatTensor] = None , **_UpperCAmelCase: List[str] , ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :Union[str, Any] = 1
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :Tuple = len(_UpperCAmelCase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(_UpperCAmelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(_UpperCAmelCase )}.""" )
# get prompt text embeddings
_lowerCAmelCase :List[Any] = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
_lowerCAmelCase :Tuple = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowerCAmelCase :int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_lowerCAmelCase :Optional[Any] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
_lowerCAmelCase :Dict = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Optional[int] = text_embeddings.shape
_lowerCAmelCase :Tuple = text_embeddings.repeat(1 , _UpperCAmelCase , 1 )
_lowerCAmelCase :Dict = text_embeddings.view(bs_embed * num_images_per_prompt , _UpperCAmelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCAmelCase :str = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase :List[str]
if negative_prompt is None:
_lowerCAmelCase :Dict = ['']
elif type(_UpperCAmelCase ) is not type(_UpperCAmelCase ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(_UpperCAmelCase )} !="""
f""" {type(_UpperCAmelCase )}.""" )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :Tuple = [negative_prompt]
elif batch_size != len(_UpperCAmelCase ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(_UpperCAmelCase )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
' the batch size of `prompt`.' )
else:
_lowerCAmelCase :List[str] = negative_prompt
_lowerCAmelCase :Optional[int] = text_input_ids.shape[-1]
_lowerCAmelCase :List[Any] = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='pt' , )
_lowerCAmelCase :Optional[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase :int = uncond_embeddings.shape[1]
_lowerCAmelCase :Any = uncond_embeddings.repeat(_UpperCAmelCase , _UpperCAmelCase , 1 )
_lowerCAmelCase :int = uncond_embeddings.view(batch_size * num_images_per_prompt , _UpperCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase :Any = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCAmelCase :List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_lowerCAmelCase :Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
_lowerCAmelCase :Union[str, Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_lowerCAmelCase :List[Any] = torch.randn(
_UpperCAmelCase , generator=_UpperCAmelCase , device='cpu' , dtype=_UpperCAmelCase ).to(self.device )
_lowerCAmelCase :str = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device='cpu' , dtype=_UpperCAmelCase ).to(
self.device )
else:
_lowerCAmelCase :Optional[Any] = torch.randn(
_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
_lowerCAmelCase :List[str] = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_lowerCAmelCase :Optional[Any] = latents_reference.to(self.device )
_lowerCAmelCase :Union[str, Any] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
_lowerCAmelCase :List[str] = (latents_shape[3] - latents_shape_reference[3]) // 2
_lowerCAmelCase :Dict = (latents_shape[2] - latents_shape_reference[2]) // 2
_lowerCAmelCase :List[Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
_lowerCAmelCase :List[Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
_lowerCAmelCase :str = 0 if dx < 0 else dx
_lowerCAmelCase :Optional[Any] = 0 if dy < 0 else dy
_lowerCAmelCase :Any = max(-dx , 0 )
_lowerCAmelCase :Any = max(-dy , 0 )
# import pdb
# pdb.set_trace()
_lowerCAmelCase :Tuple = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(_UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_lowerCAmelCase :str = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase :List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase :Optional[int] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase :Optional[Any] = {}
if accepts_eta:
_lowerCAmelCase :Dict = eta
for i, t in enumerate(self.progress_bar(_UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase :Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase :Tuple = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
_lowerCAmelCase :List[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase :Optional[int] = noise_pred.chunk(2 )
_lowerCAmelCase :Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase :List[str] = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Dict = 1 / 0.1_8_2_1_5 * latents
_lowerCAmelCase :Union[str, Any] = self.vae.decode(_UpperCAmelCase ).sample
_lowerCAmelCase :Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase :List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
_lowerCAmelCase :Dict = self.feature_extractor(self.numpy_to_pil(_UpperCAmelCase ) , return_tensors='pt' ).to(
self.device )
_lowerCAmelCase , _lowerCAmelCase :Any = self.safety_checker(
images=_UpperCAmelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
_lowerCAmelCase :Tuple = None
if output_type == "pil":
_lowerCAmelCase :int = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=_UpperCAmelCase , nsfw_content_detected=_UpperCAmelCase ) | 687 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : int = 'bert'
def __init__( self: Optional[Any] , _UpperCAmelCase: Tuple=3_0522 , _UpperCAmelCase: int=768 , _UpperCAmelCase: Union[str, Any]=12 , _UpperCAmelCase: Dict=12 , _UpperCAmelCase: List[Any]=3072 , _UpperCAmelCase: List[Any]="gelu" , _UpperCAmelCase: Union[str, Any]=0.1 , _UpperCAmelCase: Dict=0.1 , _UpperCAmelCase: List[Any]=512 , _UpperCAmelCase: Optional[Any]=2 , _UpperCAmelCase: Optional[int]=0.0_2 , _UpperCAmelCase: Any=1e-1_2 , _UpperCAmelCase: Optional[Any]=0 , _UpperCAmelCase: Union[str, Any]="absolute" , _UpperCAmelCase: Dict=True , _UpperCAmelCase: Optional[Any]=None , **_UpperCAmelCase: Optional[int] , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :List[Any] = vocab_size
_lowerCAmelCase :Tuple = hidden_size
_lowerCAmelCase :Dict = num_hidden_layers
_lowerCAmelCase :Optional[Any] = num_attention_heads
_lowerCAmelCase :List[Any] = hidden_act
_lowerCAmelCase :int = intermediate_size
_lowerCAmelCase :Tuple = hidden_dropout_prob
_lowerCAmelCase :Tuple = attention_probs_dropout_prob
_lowerCAmelCase :List[Any] = max_position_embeddings
_lowerCAmelCase :Dict = type_vocab_size
_lowerCAmelCase :Any = initializer_range
_lowerCAmelCase :int = layer_norm_eps
_lowerCAmelCase :List[Any] = position_embedding_type
_lowerCAmelCase :int = use_cache
_lowerCAmelCase :Union[str, Any] = classifier_dropout
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
if self.task == "multiple-choice":
_lowerCAmelCase :List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase :Any = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] ) | 687 | 1 |
import gc
import threading
import time
import psutil
import torch
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: List[str] ):
_lowerCAmelCase :Union[str, Any] = psutil.Process()
_lowerCAmelCase :int = False
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Any = -1
while True:
_lowerCAmelCase :List[str] = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :Optional[Any] = True
_lowerCAmelCase :Tuple = threading.Thread(target=self.peak_monitor )
_lowerCAmelCase :Tuple = True
self.thread.start()
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :List[Any] = False
self.thread.join()
return self.cpu_memory_peak
a = PeakCPUMemory()
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :Dict = {'time': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_lowerCAmelCase :int = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
_lowerCAmelCase :Tuple = torch.cuda.memory_allocated(__magic_name__ )
torch.cuda.reset_peak_memory_stats()
return measures
def UpperCamelCase_( __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :List[str] = {'time': time.time() - start_measures['time']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_lowerCAmelCase :Union[str, Any] = (psutil.Process().memory_info().rss - start_measures['cpu']) / 2**20
_lowerCAmelCase :Any = (cpu_peak_tracker.stop() - start_measures['cpu']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
_lowerCAmelCase :str = (torch.cuda.memory_allocated(__magic_name__ ) - start_measures[str(__magic_name__ )]) / 2**20
_lowerCAmelCase :int = (torch.cuda.max_memory_allocated(__magic_name__ ) - start_measures[str(__magic_name__ )]) / 2**20
return measures
def UpperCamelCase_( __magic_name__ : Any , __magic_name__ : Dict ):
"""simple docstring"""
print(f"""{description}:""" )
print(f"""- Time: {measures["time"]:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(f"""- GPU {i} allocated: {measures[str(__magic_name__ )]:.2f}MiB""" )
_lowerCAmelCase :Optional[Any] = measures[f"""{i}-peak"""]
print(f"""- GPU {i} peak: {peak:.2f}MiB""" )
print(f"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" )
print(f"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" ) | 687 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ):
"""simple docstring"""
if isinstance(__magic_name__ , torch.Tensor ):
return image
elif isinstance(__magic_name__ , PIL.Image.Image ):
_lowerCAmelCase :Tuple = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowerCAmelCase :List[Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_lowerCAmelCase :Optional[Any] = np.concatenate(__magic_name__ , axis=0 )
_lowerCAmelCase :Any = np.array(__magic_name__ ).astype(np.floataa ) / 255.0
_lowerCAmelCase :Optional[int] = image.transpose(0 , 3 , 1 , 2 )
_lowerCAmelCase :int = 2.0 * image - 1.0
_lowerCAmelCase :Optional[int] = torch.from_numpy(__magic_name__ )
elif isinstance(image[0] , torch.Tensor ):
_lowerCAmelCase :str = torch.cat(__magic_name__ , dim=0 )
return image
def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : int=0.9995 ):
"""simple docstring"""
if not isinstance(__magic_name__ , np.ndarray ):
_lowerCAmelCase :Tuple = True
_lowerCAmelCase :str = va.device
_lowerCAmelCase :List[str] = va.cpu().numpy()
_lowerCAmelCase :List[str] = va.cpu().numpy()
_lowerCAmelCase :Any = np.sum(va * va / (np.linalg.norm(__magic_name__ ) * np.linalg.norm(__magic_name__ )) )
if np.abs(__magic_name__ ) > DOT_THRESHOLD:
_lowerCAmelCase :Optional[Any] = (1 - t) * va + t * va
else:
_lowerCAmelCase :int = np.arccos(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = np.sin(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = theta_a * t
_lowerCAmelCase :str = np.sin(__magic_name__ )
_lowerCAmelCase :Any = np.sin(theta_a - theta_t ) / sin_theta_a
_lowerCAmelCase :Optional[Any] = sin_theta_t / sin_theta_a
_lowerCAmelCase :List[Any] = sa * va + sa * va
if inputs_are_torch:
_lowerCAmelCase :int = torch.from_numpy(__magic_name__ ).to(__magic_name__ )
return va
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :Any = F.normalize(__magic_name__ , dim=-1 )
_lowerCAmelCase :str = F.normalize(__magic_name__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
for param in model.parameters():
_lowerCAmelCase :List[str] = value
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def __init__( self: Any , _UpperCAmelCase: AutoencoderKL , _UpperCAmelCase: CLIPTextModel , _UpperCAmelCase: CLIPModel , _UpperCAmelCase: CLIPTokenizer , _UpperCAmelCase: UNetaDConditionModel , _UpperCAmelCase: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , _UpperCAmelCase: CLIPFeatureExtractor , _UpperCAmelCase: str=None , _UpperCAmelCase: Tuple=None , _UpperCAmelCase: Union[str, Any]=None , ):
super().__init__()
self.register_modules(
vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , clip_model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , coca_model=_UpperCAmelCase , coca_tokenizer=_UpperCAmelCase , coca_transform=_UpperCAmelCase , )
_lowerCAmelCase :int = (
feature_extractor.size
if isinstance(feature_extractor.size , _UpperCAmelCase )
else feature_extractor.size['shortest_edge']
)
_lowerCAmelCase :Union[str, Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _UpperCAmelCase )
set_requires_grad(self.clip_model , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowerCAmelCase :Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
self.enable_attention_slicing(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
set_requires_grad(self.vae , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
set_requires_grad(self.vae , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
set_requires_grad(self.unet , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
set_requires_grad(self.unet , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Dict ):
# get the original timestep using init_timestep
_lowerCAmelCase :Optional[Any] = min(int(num_inference_steps * strength ) , _UpperCAmelCase )
_lowerCAmelCase :List[str] = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase :Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Union[str, Any]=None ):
if not isinstance(_UpperCAmelCase , torch.Tensor ):
raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(_UpperCAmelCase )}""" )
_lowerCAmelCase :Union[str, Any] = image.to(device=_UpperCAmelCase , dtype=_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :List[Any] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCAmelCase )
]
_lowerCAmelCase :List[str] = torch.cat(_UpperCAmelCase , dim=0 )
else:
_lowerCAmelCase :List[str] = self.vae.encode(_UpperCAmelCase ).latent_dist.sample(_UpperCAmelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :List[Any] = 0.1_8_2_1_5 * init_latents
_lowerCAmelCase :List[Any] = init_latents.repeat_interleave(_UpperCAmelCase , dim=0 )
_lowerCAmelCase :Dict = randn_tensor(init_latents.shape , generator=_UpperCAmelCase , device=_UpperCAmelCase , dtype=_UpperCAmelCase )
# get latents
_lowerCAmelCase :Dict = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :List[str] = init_latents
return latents
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Union[str, Any] ):
_lowerCAmelCase :Optional[int] = self.coca_transform(_UpperCAmelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_lowerCAmelCase :Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_lowerCAmelCase :int = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: List[str] ):
_lowerCAmelCase :Optional[int] = self.feature_extractor.preprocess(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
_lowerCAmelCase :List[str] = self.clip_model.get_image_features(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase )
_lowerCAmelCase :Dict = image_embeddings_clip.repeat_interleave(_UpperCAmelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , _UpperCAmelCase: Dict , _UpperCAmelCase: str , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , ):
_lowerCAmelCase :Dict = latents.detach().requires_grad_()
_lowerCAmelCase :Optional[Any] = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
_lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_lowerCAmelCase :int = self.scheduler.alphas_cumprod[timestep]
_lowerCAmelCase :Optional[int] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase :str = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_lowerCAmelCase :Optional[Any] = torch.sqrt(_UpperCAmelCase )
_lowerCAmelCase :List[str] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _UpperCAmelCase ):
_lowerCAmelCase :Dict = self.scheduler.sigmas[index]
_lowerCAmelCase :Optional[Any] = latents - sigma * noise_pred
else:
raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :Tuple = 1 / 0.1_8_2_1_5 * sample
_lowerCAmelCase :Optional[Any] = self.vae.decode(_UpperCAmelCase ).sample
_lowerCAmelCase :List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase :Tuple = transforms.Resize(self.feature_extractor_size )(_UpperCAmelCase )
_lowerCAmelCase :Tuple = self.normalize(_UpperCAmelCase ).to(latents.dtype )
_lowerCAmelCase :List[Any] = self.clip_model.get_image_features(_UpperCAmelCase )
_lowerCAmelCase :List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase )
_lowerCAmelCase :Tuple = spherical_dist_loss(_UpperCAmelCase , _UpperCAmelCase ).mean() * clip_guidance_scale
_lowerCAmelCase :str = -torch.autograd.grad(_UpperCAmelCase , _UpperCAmelCase )[0]
if isinstance(self.scheduler , _UpperCAmelCase ):
_lowerCAmelCase :Union[str, Any] = latents.detach() + grads * (sigma**2)
_lowerCAmelCase :Dict = noise_pred_original
else:
_lowerCAmelCase :Optional[int] = noise_pred_original - torch.sqrt(_UpperCAmelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self: Optional[int] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: float = 0.6 , _UpperCAmelCase: Optional[int] = 50 , _UpperCAmelCase: Optional[float] = 7.5 , _UpperCAmelCase: Optional[int] = 1 , _UpperCAmelCase: float = 0.0 , _UpperCAmelCase: Optional[float] = 100 , _UpperCAmelCase: Optional[torch.Generator] = None , _UpperCAmelCase: Optional[str] = "pil" , _UpperCAmelCase: bool = True , _UpperCAmelCase: float = 0.8 , _UpperCAmelCase: float = 0.1 , _UpperCAmelCase: float = 0.1 , ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size:
raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(_UpperCAmelCase )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(_UpperCAmelCase , torch.Generator ) and batch_size > 1:
_lowerCAmelCase :int = [generator] + [None] * (batch_size - 1)
_lowerCAmelCase :List[Any] = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
_lowerCAmelCase :Optional[int] = [x[0] for x in coca_is_none if x[1]]
_lowerCAmelCase :List[str] = ', '.join(_UpperCAmelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_UpperCAmelCase ):
raise ValueError(
f"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowerCAmelCase :List[Any] = self.get_image_description(_UpperCAmelCase )
if style_prompt is None:
if len(_UpperCAmelCase ):
raise ValueError(
f"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowerCAmelCase :Any = self.get_image_description(_UpperCAmelCase )
# get prompt text embeddings for content and style
_lowerCAmelCase :Any = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , )
_lowerCAmelCase :str = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_lowerCAmelCase :int = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , )
_lowerCAmelCase :Union[str, Any] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_lowerCAmelCase :List[str] = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# duplicate text embeddings for each generation per prompt
_lowerCAmelCase :str = text_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 )
# set timesteps
_lowerCAmelCase :Any = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_lowerCAmelCase :Dict = {}
if accepts_offset:
_lowerCAmelCase :Optional[int] = 1
self.scheduler.set_timesteps(_UpperCAmelCase , **_UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_lowerCAmelCase , _lowerCAmelCase :List[str] = self.get_timesteps(_UpperCAmelCase , _UpperCAmelCase , self.device )
_lowerCAmelCase :int = timesteps[:1].repeat(_UpperCAmelCase )
# Preprocess image
_lowerCAmelCase :Dict = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :int = self.prepare_latents(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase )
_lowerCAmelCase :Any = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.prepare_latents(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase )
_lowerCAmelCase :str = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if clip_guidance_scale > 0:
_lowerCAmelCase :Optional[Any] = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Dict = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Any = slerp(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCAmelCase :int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase :Optional[int] = content_text_input.input_ids.shape[-1]
_lowerCAmelCase :Union[str, Any] = self.tokenizer([''] , padding='max_length' , max_length=_UpperCAmelCase , return_tensors='pt' )
_lowerCAmelCase :Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_lowerCAmelCase :Optional[int] = uncond_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase :int = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCAmelCase :Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_lowerCAmelCase :Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_lowerCAmelCase :Any = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device='cpu' , dtype=_UpperCAmelCase ).to(
self.device )
else:
_lowerCAmelCase :List[Any] = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_lowerCAmelCase :int = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase :Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase :Any = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase :Any = {}
if accepts_eta:
_lowerCAmelCase :Any = eta
# check if the scheduler accepts generator
_lowerCAmelCase :List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_lowerCAmelCase :List[Any] = generator
with self.progress_bar(total=_UpperCAmelCase ):
for i, t in enumerate(_UpperCAmelCase ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase :Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase :Tuple = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
_lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase :List[str] = noise_pred.chunk(2 )
_lowerCAmelCase :Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_lowerCAmelCase :List[Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_lowerCAmelCase , _lowerCAmelCase :List[str] = self.cond_fn(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase :str = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :str = 1 / 0.1_8_2_1_5 * latents
_lowerCAmelCase :Any = self.vae.decode(_UpperCAmelCase ).sample
_lowerCAmelCase :List[str] = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase :Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase :List[Any] = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_UpperCAmelCase , nsfw_content_detected=_UpperCAmelCase ) | 687 | 1 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
a = logging.get_logger(__name__)
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : List[str]=None , __magic_name__ : int=None ):
"""simple docstring"""
if "." in tensor_name:
_lowerCAmelCase :Optional[Any] = tensor_name.split('.' )
for split in splits[:-1]:
_lowerCAmelCase :Tuple = getattr(__magic_name__ , __magic_name__ )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
_lowerCAmelCase :Optional[Any] = new_module
_lowerCAmelCase :Optional[int] = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
_lowerCAmelCase :str = tensor_name in module._buffers
_lowerCAmelCase :Any = getattr(__magic_name__ , __magic_name__ )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(f"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
_lowerCAmelCase :int = False
_lowerCAmelCase :List[Any] = False
if is_buffer or not is_bitsandbytes_available():
_lowerCAmelCase :Optional[Any] = False
_lowerCAmelCase :Optional[int] = False
else:
_lowerCAmelCase :int = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
_lowerCAmelCase :Union[str, Any] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
_lowerCAmelCase :List[Any] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
_lowerCAmelCase :Optional[Any] = old_value.to(__magic_name__ )
elif isinstance(__magic_name__ , torch.Tensor ):
_lowerCAmelCase :Any = value.to('cpu' )
if value.dtype == torch.inta:
_lowerCAmelCase :str = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
_lowerCAmelCase :List[Any] = torch.tensor(__magic_name__ , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , __magic_name__ ) and fpaa_statistics is None:
_lowerCAmelCase :Dict = new_value.T
_lowerCAmelCase :Dict = old_value.__dict__
if is_abit:
_lowerCAmelCase :List[str] = bnb.nn.IntaParams(__magic_name__ , requires_grad=__magic_name__ , **__magic_name__ ).to(__magic_name__ )
elif is_abit:
_lowerCAmelCase :Any = bnb.nn.Paramsabit(__magic_name__ , requires_grad=__magic_name__ , **__magic_name__ ).to(__magic_name__ )
_lowerCAmelCase :List[Any] = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(__magic_name__ ) )
else:
if value is None:
_lowerCAmelCase :str = old_value.to(__magic_name__ )
elif isinstance(__magic_name__ , torch.Tensor ):
_lowerCAmelCase :Dict = value.to(__magic_name__ )
else:
_lowerCAmelCase :Any = torch.tensor(__magic_name__ , device=__magic_name__ )
if is_buffer:
_lowerCAmelCase :List[Any] = new_value
else:
_lowerCAmelCase :Any = nn.Parameter(__magic_name__ , requires_grad=old_value.requires_grad )
_lowerCAmelCase :List[Any] = new_value
def UpperCamelCase_( __magic_name__ : Dict , __magic_name__ : Optional[int]=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : Optional[Any]=None , __magic_name__ : int=False ):
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
_lowerCAmelCase :Optional[Any] = []
current_key_name.append(__magic_name__ )
if (isinstance(__magic_name__ , nn.Linear ) or isinstance(__magic_name__ , __magic_name__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(__magic_name__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__magic_name__ , __magic_name__ ):
_lowerCAmelCase , _lowerCAmelCase :Union[str, Any] = module.weight.shape
else:
_lowerCAmelCase :str = module.in_features
_lowerCAmelCase :Dict = module.out_features
if quantization_config.quantization_method() == "llm_int8":
_lowerCAmelCase :Dict = bnb.nn.LinearabitLt(
__magic_name__ , __magic_name__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
_lowerCAmelCase :Optional[Any] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
_lowerCAmelCase :Tuple = bnb.nn.Linearabit(
__magic_name__ , __magic_name__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
_lowerCAmelCase :Tuple = True
# Store the module class in case we need to transpose the weight later
_lowerCAmelCase :Any = type(__magic_name__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__magic_name__ )
if len(list(module.children() ) ) > 0:
_lowerCAmelCase , _lowerCAmelCase :str = _replace_with_bnb_linear(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_been_replaced=__magic_name__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Optional[Any]=None , __magic_name__ : Tuple=None , __magic_name__ : Union[str, Any]=None ):
"""simple docstring"""
_lowerCAmelCase :Any = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
_lowerCAmelCase , _lowerCAmelCase :Optional[int] = _replace_with_bnb_linear(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def UpperCamelCase_( *__magic_name__ : int , **__magic_name__ : Any ):
"""simple docstring"""
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , __magic_name__ , )
return replace_with_bnb_linear(*__magic_name__ , **__magic_name__ )
def UpperCamelCase_( *__magic_name__ : int , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , __magic_name__ , )
return set_module_quantized_tensor_to_device(*__magic_name__ , **__magic_name__ )
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :Dict = deepcopy(__magic_name__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
_lowerCAmelCase :Tuple = find_tied_parameters(__magic_name__ )
# For compatibility with Accelerate < 0.18
if isinstance(__magic_name__ , __magic_name__ ):
_lowerCAmelCase :str = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_lowerCAmelCase :Any = sum(__magic_name__ , [] )
_lowerCAmelCase :Optional[int] = len(__magic_name__ ) > 0
# Check if it is a base model
_lowerCAmelCase :int = not hasattr(__magic_name__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_lowerCAmelCase :List[str] = list(model.named_children() )
_lowerCAmelCase :Union[str, Any] = [list_modules[-1][0]]
# add last module together with tied weights
_lowerCAmelCase :Union[str, Any] = set(__magic_name__ ) - set(__magic_name__ )
_lowerCAmelCase :Dict = list(set(__magic_name__ ) ) + list(__magic_name__ )
# remove ".weight" from the keys
_lowerCAmelCase :Dict = ['.weight', '.bias']
_lowerCAmelCase :int = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_lowerCAmelCase :List[str] = name.replace(__magic_name__ , '' )
filtered_module_names.append(__magic_name__ )
return filtered_module_names | 687 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :Optional[int] = list(__magic_name__ )
_lowerCAmelCase :Dict = list(__magic_name__ )
_lowerCAmelCase :Any = 0
for i in range(len(__magic_name__ ) ):
if lista[i] != lista[i]:
count += 1
_lowerCAmelCase :Union[str, Any] = '_'
if count > 1:
return False
else:
return "".join(__magic_name__ )
def UpperCamelCase_( __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :int = []
while True:
_lowerCAmelCase :str = ['$'] * len(__magic_name__ )
_lowerCAmelCase :Optional[int] = []
for i in range(len(__magic_name__ ) ):
for j in range(i + 1 , len(__magic_name__ ) ):
_lowerCAmelCase :int = compare_string(binary[i] , binary[j] )
if k is False:
_lowerCAmelCase :str = '*'
_lowerCAmelCase :Union[str, Any] = '*'
temp.append('X' )
for i in range(len(__magic_name__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__magic_name__ ) == 0:
return pi
_lowerCAmelCase :Any = list(set(__magic_name__ ) )
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : Sequence[float] ):
"""simple docstring"""
_lowerCAmelCase :str = []
for minterm in minterms:
_lowerCAmelCase :Any = ''
for _ in range(__magic_name__ ):
_lowerCAmelCase :Tuple = str(minterm % 2 ) + string
minterm //= 2
temp.append(__magic_name__ )
return temp
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str , __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = list(__magic_name__ )
_lowerCAmelCase :List[Any] = list(__magic_name__ )
_lowerCAmelCase :Optional[Any] = 0
for i in range(len(__magic_name__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase_( __magic_name__ : list[list[int]] , __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :str = []
_lowerCAmelCase :List[str] = [0] * len(__magic_name__ )
for i in range(len(chart[0] ) ):
_lowerCAmelCase :Dict = 0
_lowerCAmelCase :Optional[Any] = -1
for j in range(len(__magic_name__ ) ):
if chart[j][i] == 1:
count += 1
_lowerCAmelCase :List[Any] = j
if count == 1:
_lowerCAmelCase :Dict = 1
for i in range(len(__magic_name__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__magic_name__ ) ):
_lowerCAmelCase :Dict = 0
temp.append(prime_implicants[i] )
while True:
_lowerCAmelCase :Dict = 0
_lowerCAmelCase :Any = -1
_lowerCAmelCase :Optional[Any] = 0
for i in range(len(__magic_name__ ) ):
_lowerCAmelCase :str = chart[i].count(1 )
if count_n > max_n:
_lowerCAmelCase :Optional[Any] = count_n
_lowerCAmelCase :Dict = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__magic_name__ ) ):
_lowerCAmelCase :str = 0
def UpperCamelCase_( __magic_name__ : list[str] , __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :str = [[0 for x in range(len(__magic_name__ ) )] for x in range(len(__magic_name__ ) )]
for i in range(len(__magic_name__ ) ):
_lowerCAmelCase :Tuple = prime_implicants[i].count('_' )
for j in range(len(__magic_name__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , __magic_name__ ):
_lowerCAmelCase :str = 1
return chart
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :Tuple = int(input('Enter the no. of variables\n' ) )
_lowerCAmelCase :Tuple = [
float(__magic_name__ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
_lowerCAmelCase :List[str] = decimal_to_binary(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Any = check(__magic_name__ )
print('Prime Implicants are:' )
print(__magic_name__ )
_lowerCAmelCase :List[Any] = prime_implicant_chart(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Tuple = selection(__magic_name__ , __magic_name__ )
print('Essential Prime Implicants are:' )
print(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 687 | 1 |
def UpperCamelCase_( __magic_name__ : list ):
"""simple docstring"""
if len(__magic_name__ ) <= 1:
return [tuple(__magic_name__ )]
_lowerCAmelCase :Optional[int] = []
def generate(__magic_name__ : int , __magic_name__ : list ):
_lowerCAmelCase :Optional[int] = [0] * n
res.append(tuple(__magic_name__ ) )
_lowerCAmelCase :int = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
_lowerCAmelCase , _lowerCAmelCase :str = arr[i], arr[0]
else:
_lowerCAmelCase , _lowerCAmelCase :Dict = arr[i], arr[c[i]]
res.append(tuple(__magic_name__ ) )
c[i] += 1
_lowerCAmelCase :Optional[Any] = 0
else:
_lowerCAmelCase :Dict = 0
i += 1
generate(len(__magic_name__ ) , __magic_name__ )
return res
if __name__ == "__main__":
a = input("""Enter numbers separated by a comma:\n""").strip()
a = [int(item) for item in user_input.split(""",""")]
print(heaps(arr)) | 687 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
a = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
a = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
a = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[int]=4 , _UpperCAmelCase: Optional[int]=False ):
_lowerCAmelCase :Any = compute_bleu(
reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) :Tuple = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
} | 687 | 1 |
from typing import Any
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: int , _UpperCAmelCase: Any ):
_lowerCAmelCase :List[Any] = data
_lowerCAmelCase :str = None
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: Tuple ):
_lowerCAmelCase :Any = None
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :str = self.head
while temp is not None:
print(temp.data , end=' ' )
_lowerCAmelCase :Tuple = temp.next
print()
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: Any ):
_lowerCAmelCase :Optional[int] = Node(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = self.head
_lowerCAmelCase :Optional[int] = new_node
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Tuple , _UpperCAmelCase: List[str] ):
if node_data_a == node_data_a:
return
else:
_lowerCAmelCase :Tuple = self.head
while node_a is not None and node_a.data != node_data_a:
_lowerCAmelCase :Tuple = node_a.next
_lowerCAmelCase :Any = self.head
while node_a is not None and node_a.data != node_data_a:
_lowerCAmelCase :List[str] = node_a.next
if node_a is None or node_a is None:
return
_lowerCAmelCase , _lowerCAmelCase :Union[str, Any] = node_a.data, node_a.data
if __name__ == "__main__":
a = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list() | 687 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 | 1 |
import operator as op
def UpperCamelCase_( __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = []
_lowerCAmelCase :Dict = lambda __magic_name__ , __magic_name__ : int(x / y ) # noqa: E731 integer division operation
_lowerCAmelCase :Dict = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(__magic_name__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__magic_name__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(__magic_name__ ) , sep=' | ' )
else:
_lowerCAmelCase :Union[str, Any] = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(__magic_name__ ) , sep=' | ' )
_lowerCAmelCase :int = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(__magic_name__ ) , sep=' | ' )
stack.append(
str(opr[x](int(__magic_name__ ) , int(__magic_name__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(__magic_name__ ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
a = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix)) | 687 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self: str , _UpperCAmelCase: str , _UpperCAmelCase: Optional[int]=7 , _UpperCAmelCase: Union[str, Any]=3 , _UpperCAmelCase: int=18 , _UpperCAmelCase: List[Any]=30 , _UpperCAmelCase: List[Any]=400 , _UpperCAmelCase: Optional[Any]=True , _UpperCAmelCase: Any=None , _UpperCAmelCase: Any=True , _UpperCAmelCase: int=None , _UpperCAmelCase: Union[str, Any]=True , ):
_lowerCAmelCase :Tuple = size if size is not None else {'shortest_edge': 20}
_lowerCAmelCase :str = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_lowerCAmelCase :str = parent
_lowerCAmelCase :List[Any] = batch_size
_lowerCAmelCase :Optional[Any] = num_channels
_lowerCAmelCase :Optional[Any] = image_size
_lowerCAmelCase :int = min_resolution
_lowerCAmelCase :List[str] = max_resolution
_lowerCAmelCase :List[str] = do_resize
_lowerCAmelCase :Optional[int] = size
_lowerCAmelCase :str = do_center_crop
_lowerCAmelCase :int = crop_size
_lowerCAmelCase :Optional[int] = do_flip_channel_order
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Any = MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = MobileViTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self: str ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'center_crop' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_flip_channel_order' ) )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_lowerCAmelCase :Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self: int ):
# Initialize image_processing
_lowerCAmelCase :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
_lowerCAmelCase :Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :str = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
# Initialize image_processing
_lowerCAmelCase :int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :List[str] = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
# Initialize image_processing
_lowerCAmelCase :Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :int = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , ) | 687 | 1 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
a = NewType("""DataClass""", Any)
a = NewType("""DataClassType""", Any)
def UpperCamelCase_( __magic_name__ : List[Any] ):
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def UpperCamelCase_( __magic_name__ : list ):
"""simple docstring"""
_lowerCAmelCase :Tuple = {str(__magic_name__ ): choice for choice in choices}
return lambda __magic_name__ : str_to_choice.get(__magic_name__ , __magic_name__ )
def UpperCamelCase_( *,
__magic_name__ : Union[str, List[str]] = None , __magic_name__ : str = None , __magic_name__ : Any = dataclasses.MISSING , __magic_name__ : Callable[[], Any] = dataclasses.MISSING , __magic_name__ : dict = None , **__magic_name__ : Optional[int] , ):
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
_lowerCAmelCase :Dict = {}
if aliases is not None:
_lowerCAmelCase :Union[str, Any] = aliases
if help is not None:
_lowerCAmelCase :List[str] = help
return dataclasses.field(metadata=__magic_name__ , default=__magic_name__ , default_factory=__magic_name__ , **__magic_name__ )
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Iterable[DataClassType]
def __init__( self: Dict , _UpperCAmelCase: Union[DataClassType, Iterable[DataClassType]] , **_UpperCAmelCase: List[str] ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
_lowerCAmelCase :List[Any] = ArgumentDefaultsHelpFormatter
super().__init__(**_UpperCAmelCase )
if dataclasses.is_dataclass(_UpperCAmelCase ):
_lowerCAmelCase :Optional[Any] = [dataclass_types]
_lowerCAmelCase :Optional[Any] = list(_UpperCAmelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_UpperCAmelCase )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase: ArgumentParser , _UpperCAmelCase: dataclasses.Field ):
_lowerCAmelCase :int = f"""--{field.name}"""
_lowerCAmelCase :Dict = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _UpperCAmelCase ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
_lowerCAmelCase :str = kwargs.pop('aliases' , [] )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :Tuple = [aliases]
_lowerCAmelCase :Dict = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(_UpperCAmelCase , 'UnionType' ) and isinstance(_UpperCAmelCase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_UpperCAmelCase ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
f""" Problem encountered in field '{field.name}'.""" )
if type(_UpperCAmelCase ) not in field.type.__args__:
# filter `str` in Union
_lowerCAmelCase :Dict = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
_lowerCAmelCase :Union[str, Any] = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
_lowerCAmelCase :List[str] = (
field.type.__args__[0] if isinstance(_UpperCAmelCase , field.type.__args__[1] ) else field.type.__args__[1]
)
_lowerCAmelCase :Any = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
_lowerCAmelCase :Tuple = {}
if origin_type is Literal or (isinstance(field.type , _UpperCAmelCase ) and issubclass(field.type , _UpperCAmelCase )):
if origin_type is Literal:
_lowerCAmelCase :Tuple = field.type.__args__
else:
_lowerCAmelCase :str = [x.value for x in field.type]
_lowerCAmelCase :Dict = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
_lowerCAmelCase :List[Any] = field.default
else:
_lowerCAmelCase :Tuple = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
_lowerCAmelCase :Optional[Any] = copy(_UpperCAmelCase )
# Hack because type=bool in argparse does not behave as we want.
_lowerCAmelCase :Optional[Any] = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
_lowerCAmelCase :List[str] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
_lowerCAmelCase :Optional[int] = default
# This tells argparse we accept 0 or 1 value after --field_name
_lowerCAmelCase :Any = '?'
# This is the value that will get picked if we do --field_name (without value)
_lowerCAmelCase :Any = True
elif isclass(_UpperCAmelCase ) and issubclass(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :Tuple = field.type.__args__[0]
_lowerCAmelCase :int = '+'
if field.default_factory is not dataclasses.MISSING:
_lowerCAmelCase :int = field.default_factory()
elif field.default is dataclasses.MISSING:
_lowerCAmelCase :int = True
else:
_lowerCAmelCase :str = field.type
if field.default is not dataclasses.MISSING:
_lowerCAmelCase :Any = field.default
elif field.default_factory is not dataclasses.MISSING:
_lowerCAmelCase :Dict = field.default_factory()
else:
_lowerCAmelCase :Dict = True
parser.add_argument(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
_lowerCAmelCase :Union[str, Any] = False
parser.add_argument(f"""--no_{field.name}""" , action='store_false' , dest=field.name , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: DataClassType ):
if hasattr(_UpperCAmelCase , '_argument_group_name' ):
_lowerCAmelCase :Any = self.add_argument_group(dtype._argument_group_name )
else:
_lowerCAmelCase :Tuple = self
try:
_lowerCAmelCase :Dict[str, type] = get_type_hints(_UpperCAmelCase )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_UpperCAmelCase ):
_lowerCAmelCase :List[Any] = '.'.join(map(_UpperCAmelCase , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(_UpperCAmelCase ):
if not field.init:
continue
_lowerCAmelCase :List[Any] = type_hints[field.name]
self._parse_dataclass_field(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: Optional[int]=None , _UpperCAmelCase: Optional[int]=False , _UpperCAmelCase: Tuple=True , _UpperCAmelCase: Dict=None , _UpperCAmelCase: int=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
_lowerCAmelCase :Any = []
if args_filename:
args_files.append(Path(_UpperCAmelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
_lowerCAmelCase :Tuple = ArgumentParser()
args_file_parser.add_argument(_UpperCAmelCase , type=_UpperCAmelCase , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
_lowerCAmelCase , _lowerCAmelCase :List[Any] = args_file_parser.parse_known_args(args=_UpperCAmelCase )
_lowerCAmelCase :Any = vars(_UpperCAmelCase ).get(args_file_flag.lstrip('-' ) , _UpperCAmelCase )
if cmd_args_file_paths:
args_files.extend([Path(_UpperCAmelCase ) for p in cmd_args_file_paths] )
_lowerCAmelCase :List[Any] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
_lowerCAmelCase :int = file_args + args if args is not None else file_args + sys.argv[1:]
_lowerCAmelCase , _lowerCAmelCase :Dict = self.parse_known_args(args=_UpperCAmelCase )
_lowerCAmelCase :List[Any] = []
for dtype in self.dataclass_types:
_lowerCAmelCase :List[str] = {f.name for f in dataclasses.fields(_UpperCAmelCase ) if f.init}
_lowerCAmelCase :Tuple = {k: v for k, v in vars(_UpperCAmelCase ).items() if k in keys}
for k in keys:
delattr(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :str = dtype(**_UpperCAmelCase )
outputs.append(_UpperCAmelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_UpperCAmelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Dict[str, Any] , _UpperCAmelCase: bool = False ):
_lowerCAmelCase :Tuple = set(args.keys() )
_lowerCAmelCase :Union[str, Any] = []
for dtype in self.dataclass_types:
_lowerCAmelCase :str = {f.name for f in dataclasses.fields(_UpperCAmelCase ) if f.init}
_lowerCAmelCase :Any = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
_lowerCAmelCase :List[Any] = dtype(**_UpperCAmelCase )
outputs.append(_UpperCAmelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(_UpperCAmelCase )}""" )
return tuple(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: str , _UpperCAmelCase: bool = False ):
with open(Path(_UpperCAmelCase ) , encoding='utf-8' ) as open_json_file:
_lowerCAmelCase :Any = json.loads(open_json_file.read() )
_lowerCAmelCase :List[str] = self.parse_dict(_UpperCAmelCase , allow_extra_keys=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: str , _UpperCAmelCase: bool = False ):
_lowerCAmelCase :List[Any] = self.parse_dict(yaml.safe_load(Path(_UpperCAmelCase ).read_text() ) , allow_extra_keys=_UpperCAmelCase )
return tuple(_UpperCAmelCase ) | 687 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ (datasets.BuilderConfig ):
"""simple docstring"""
lowerCamelCase : Optional[datasets.Features] = None
class UpperCAmelCase_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCamelCase : Any = PandasConfig
def SCREAMING_SNAKE_CASE__ ( self: int ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: List[str] ):
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_lowerCAmelCase :Dict = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_UpperCAmelCase , (str, list, tuple) ):
_lowerCAmelCase :Any = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase :List[Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
_lowerCAmelCase :Any = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase :Union[str, Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={'files': files} ) )
return splits
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: pa.Table ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase :str = table_cast(_UpperCAmelCase , self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: Dict ):
for i, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase ) ):
with open(_UpperCAmelCase , 'rb' ) as f:
_lowerCAmelCase :Optional[Any] = pa.Table.from_pandas(pd.read_pickle(_UpperCAmelCase ) )
yield i, self._cast_table(_UpperCAmelCase ) | 687 | 1 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : List[Any] = LEDConfig
lowerCamelCase : Tuple = {}
lowerCamelCase : int = 'gelu'
def __init__( self: List[str] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Any=13 , _UpperCAmelCase: str=7 , _UpperCAmelCase: List[str]=True , _UpperCAmelCase: List[str]=False , _UpperCAmelCase: int=99 , _UpperCAmelCase: Optional[Any]=32 , _UpperCAmelCase: Tuple=2 , _UpperCAmelCase: int=4 , _UpperCAmelCase: Dict=37 , _UpperCAmelCase: Optional[int]=0.1 , _UpperCAmelCase: int=0.1 , _UpperCAmelCase: int=20 , _UpperCAmelCase: Union[str, Any]=2 , _UpperCAmelCase: str=1 , _UpperCAmelCase: str=0 , _UpperCAmelCase: Dict=4 , ):
_lowerCAmelCase :int = parent
_lowerCAmelCase :int = batch_size
_lowerCAmelCase :int = seq_length
_lowerCAmelCase :Any = is_training
_lowerCAmelCase :Tuple = use_labels
_lowerCAmelCase :Optional[Any] = vocab_size
_lowerCAmelCase :Optional[int] = hidden_size
_lowerCAmelCase :Any = num_hidden_layers
_lowerCAmelCase :List[Any] = num_attention_heads
_lowerCAmelCase :str = intermediate_size
_lowerCAmelCase :Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase :List[Any] = attention_probs_dropout_prob
_lowerCAmelCase :Any = max_position_embeddings
_lowerCAmelCase :Union[str, Any] = eos_token_id
_lowerCAmelCase :List[Any] = pad_token_id
_lowerCAmelCase :int = bos_token_id
_lowerCAmelCase :List[Any] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_lowerCAmelCase :int = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_lowerCAmelCase :List[Any] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowerCAmelCase :Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowerCAmelCase :Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
_lowerCAmelCase :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase :Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_lowerCAmelCase :Optional[int] = prepare_led_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Dict = tf.concat(
[tf.zeros_like(_UpperCAmelCase )[:, :-1], tf.ones_like(_UpperCAmelCase )[:, -1:]] , axis=-1 , )
_lowerCAmelCase :Any = global_attention_mask
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: List[Any] , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Tuple = TFLEDModel(config=_UpperCAmelCase ).get_decoder()
_lowerCAmelCase :Any = inputs_dict['input_ids']
_lowerCAmelCase :List[str] = input_ids[:1, :]
_lowerCAmelCase :Optional[Any] = inputs_dict['attention_mask'][:1, :]
_lowerCAmelCase :Dict = 1
# first forward pass
_lowerCAmelCase :Optional[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase :int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase :List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCAmelCase :Dict = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_lowerCAmelCase :Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_lowerCAmelCase :List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_lowerCAmelCase :Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
_lowerCAmelCase :List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_lowerCAmelCase :Optional[int] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_lowerCAmelCase :Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
_lowerCAmelCase :str = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCAmelCase , _UpperCAmelCase , rtol=1e-3 )
def UpperCamelCase_( __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any]=None , __magic_name__ : int=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : Union[str, Any]=None , ):
"""simple docstring"""
if attention_mask is None:
_lowerCAmelCase :Tuple = tf.cast(tf.math.not_equal(__magic_name__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_lowerCAmelCase :List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_lowerCAmelCase :Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCAmelCase :int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class UpperCAmelCase_ (snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : str = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowerCamelCase : Optional[Any] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase : List[str] = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase : int = True
lowerCamelCase : Tuple = False
lowerCamelCase : Optional[Any] = False
lowerCamelCase : Any = False
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :List[Any] = TFLEDModelTester(self )
_lowerCAmelCase :List[str] = ConfigTester(self , config_class=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: str ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase , _lowerCAmelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :Optional[Any] = tf.zeros_like(inputs_dict['attention_mask'] )
_lowerCAmelCase :Union[str, Any] = 2
_lowerCAmelCase :Any = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
_lowerCAmelCase :Tuple = True
_lowerCAmelCase :str = self.model_tester.seq_length
_lowerCAmelCase :List[Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_UpperCAmelCase: Dict ):
_lowerCAmelCase :str = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_UpperCAmelCase: Dict ):
_lowerCAmelCase :Union[str, Any] = [t.numpy() for t in outputs.encoder_attentions]
_lowerCAmelCase :Union[str, Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_lowerCAmelCase :int = True
_lowerCAmelCase :Tuple = False
_lowerCAmelCase :Union[str, Any] = False
_lowerCAmelCase :int = model_class(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_lowerCAmelCase :Tuple = len(_UpperCAmelCase )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
if self.is_encoder_decoder:
_lowerCAmelCase :str = model_class(_UpperCAmelCase )
_lowerCAmelCase :str = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_decoder_attentions_output(_UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_lowerCAmelCase :Tuple = True
_lowerCAmelCase :Optional[Any] = model_class(_UpperCAmelCase )
_lowerCAmelCase :Tuple = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
# Check attention is always last and order is fine
_lowerCAmelCase :str = True
_lowerCAmelCase :Tuple = True
_lowerCAmelCase :Union[str, Any] = model_class(_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
pass
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
# TODO: Head-masking not yet implement
pass
def UpperCamelCase_( __magic_name__ : str ):
"""simple docstring"""
return tf.constant(__magic_name__ , dtype=tf.intaa )
a = 1E-4
@slow
@require_tf
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :List[str] = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
_lowerCAmelCase :Optional[Any] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_lowerCAmelCase :Optional[int] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_lowerCAmelCase :Any = prepare_led_inputs_dict(model.config , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Optional[int] = model(**_UpperCAmelCase )[0]
_lowerCAmelCase :int = (1, 1024, 768)
self.assertEqual(output.shape , _UpperCAmelCase )
# change to expected output here
_lowerCAmelCase :Dict = tf.convert_to_tensor(
[[2.3_0_5_0, 2.8_2_7_9, 0.6_5_3_1], [-1.8_4_5_7, -0.1_4_5_5, -3.5_6_6_1], [-1.0_1_8_6, 0.4_5_8_6, -2.2_0_4_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-3 )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Any = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
_lowerCAmelCase :List[Any] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_lowerCAmelCase :Tuple = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_lowerCAmelCase :Tuple = prepare_led_inputs_dict(model.config , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Tuple = model(**_UpperCAmelCase )[0]
_lowerCAmelCase :Dict = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , _UpperCAmelCase )
# change to expected output here
_lowerCAmelCase :List[str] = tf.convert_to_tensor(
[[3_3.6_5_0_7, 6.4_5_7_2, 1_6.8_0_8_9], [5.8_7_3_9, -2.4_2_3_8, 1_1.2_9_0_2], [-3.2_1_3_9, -4.3_1_4_9, 4.2_7_8_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-3 , rtol=1e-3 ) | 687 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
a = """"""
a = """"""
a = """"""
a = 1 # (0 is vertical, 1 is horizontal)
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase :Union[str, Any] = get_dataset(__magic_name__ , __magic_name__ )
print('Processing...' )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = update_image_and_anno(__magic_name__ , __magic_name__ , __magic_name__ )
for index, image in enumerate(__magic_name__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase :Optional[Any] = random_chars(32 )
_lowerCAmelCase :str = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
_lowerCAmelCase :Tuple = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , __magic_name__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(__magic_name__ )} with {file_name}""" )
_lowerCAmelCase :str = []
for anno in new_annos[index]:
_lowerCAmelCase :List[str] = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(__magic_name__ )
with open(f"""/{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :int = []
_lowerCAmelCase :Union[str, Any] = []
for label_file in glob.glob(os.path.join(__magic_name__ , '*.txt' ) ):
_lowerCAmelCase :Optional[int] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(__magic_name__ ) as in_file:
_lowerCAmelCase :Union[str, Any] = in_file.readlines()
_lowerCAmelCase :List[Any] = os.path.join(__magic_name__ , f"""{label_name}.jpg""" )
_lowerCAmelCase :Tuple = []
for obj_list in obj_lists:
_lowerCAmelCase :Union[str, Any] = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__magic_name__ )
labels.append(__magic_name__ )
return img_paths, labels
def UpperCamelCase_( __magic_name__ : list , __magic_name__ : list , __magic_name__ : int = 1 ):
"""simple docstring"""
_lowerCAmelCase :str = []
_lowerCAmelCase :Any = []
_lowerCAmelCase :Optional[Any] = []
for idx in range(len(__magic_name__ ) ):
_lowerCAmelCase :Optional[int] = []
_lowerCAmelCase :Optional[Any] = img_list[idx]
path_list.append(__magic_name__ )
_lowerCAmelCase :List[str] = anno_list[idx]
_lowerCAmelCase :Optional[Any] = cva.imread(__magic_name__ )
if flip_type == 1:
_lowerCAmelCase :List[Any] = cva.flip(__magic_name__ , __magic_name__ )
for bbox in img_annos:
_lowerCAmelCase :List[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_lowerCAmelCase :List[str] = cva.flip(__magic_name__ , __magic_name__ )
for bbox in img_annos:
_lowerCAmelCase :List[str] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__magic_name__ )
new_imgs_list.append(__magic_name__ )
return new_imgs_list, new_annos_lists, path_list
def UpperCamelCase_( __magic_name__ : int = 32 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase :str = ascii_lowercase + digits
return "".join(random.choice(__magic_name__ ) for _ in range(__magic_name__ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""") | 687 | 1 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
a = logging.get_logger(__name__)
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def __init__( self: Tuple , *_UpperCAmelCase: Optional[int] , **_UpperCAmelCase: Optional[int] ):
warnings.warn(
'The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use OwlViTImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) | 687 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
a = logging.get_logger(__name__)
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = nn.functional.normalize(__magic_name__ )
_lowerCAmelCase :List[str] = nn.functional.normalize(__magic_name__ )
return torch.mm(__magic_name__ , normalized_text_embeds.t() )
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : str = CLIPConfig
lowerCamelCase : Any = ['CLIPEncoderLayer']
def __init__( self: Optional[int] , _UpperCAmelCase: CLIPConfig ):
super().__init__(_UpperCAmelCase )
_lowerCAmelCase :Any = CLIPVisionModel(config.vision_config )
_lowerCAmelCase :Optional[int] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_UpperCAmelCase )
_lowerCAmelCase :int = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :Any = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :str = nn.Parameter(torch.ones(17 ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = nn.Parameter(torch.ones(3 ) , requires_grad=_UpperCAmelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Dict ):
_lowerCAmelCase :str = self.vision_model(_UpperCAmelCase )[1] # pooled_output
_lowerCAmelCase :Union[str, Any] = self.visual_projection(_UpperCAmelCase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase :Optional[int] = cosine_distance(_UpperCAmelCase , self.special_care_embeds ).cpu().float().numpy()
_lowerCAmelCase :List[str] = cosine_distance(_UpperCAmelCase , self.concept_embeds ).cpu().float().numpy()
_lowerCAmelCase :str = []
_lowerCAmelCase :List[Any] = image_embeds.shape[0]
for i in range(_UpperCAmelCase ):
_lowerCAmelCase :Optional[Any] = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
_lowerCAmelCase :List[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
_lowerCAmelCase :List[Any] = special_cos_dist[i][concept_idx]
_lowerCAmelCase :Dict = self.special_care_embeds_weights[concept_idx].item()
_lowerCAmelCase :List[Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} )
_lowerCAmelCase :Any = 0.0_1
for concept_idx in range(len(cos_dist[0] ) ):
_lowerCAmelCase :Union[str, Any] = cos_dist[i][concept_idx]
_lowerCAmelCase :str = self.concept_embeds_weights[concept_idx].item()
_lowerCAmelCase :str = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_UpperCAmelCase )
result.append(_UpperCAmelCase )
_lowerCAmelCase :Any = [len(res['bad_concepts'] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self: str , _UpperCAmelCase: torch.FloatTensor , _UpperCAmelCase: torch.FloatTensor ):
_lowerCAmelCase :Optional[int] = self.vision_model(_UpperCAmelCase )[1] # pooled_output
_lowerCAmelCase :Union[str, Any] = self.visual_projection(_UpperCAmelCase )
_lowerCAmelCase :Dict = cosine_distance(_UpperCAmelCase , self.special_care_embeds )
_lowerCAmelCase :List[str] = cosine_distance(_UpperCAmelCase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
_lowerCAmelCase :Any = 0.0
_lowerCAmelCase :Union[str, Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
_lowerCAmelCase :Tuple = torch.any(special_scores > 0 , dim=1 )
_lowerCAmelCase :List[str] = special_care * 0.0_1
_lowerCAmelCase :Any = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
_lowerCAmelCase :Optional[Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
_lowerCAmelCase :List[str] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts | 687 | 1 |
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : int ):
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
_lowerCAmelCase :int = str(bin(__magic_name__ ) )
binary_number += "0" * shift_amount
return binary_number
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : int ):
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
_lowerCAmelCase :Union[str, Any] = str(bin(__magic_name__ ) )[2:]
if shift_amount >= len(__magic_name__ ):
return "0b0"
_lowerCAmelCase :str = binary_number[: len(__magic_name__ ) - shift_amount]
return "0b" + shifted_binary_number
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : int ):
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
_lowerCAmelCase :str = '0' + str(bin(__magic_name__ ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
_lowerCAmelCase :Dict = len(bin(__magic_name__ )[3:] ) # Find 2's complement of number
_lowerCAmelCase :Union[str, Any] = bin(abs(__magic_name__ ) - (1 << binary_number_length) )[3:]
_lowerCAmelCase :List[Any] = (
'1' + '0' * (binary_number_length - len(__magic_name__ )) + binary_number
)
if shift_amount >= len(__magic_name__ ):
return "0b" + binary_number[0] * len(__magic_name__ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__magic_name__ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 687 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a = 6_3_7_8_1_3_7.0
a = 6_3_5_6_7_5_2.3_1_4_2_4_5
a = 6_378_137
def UpperCamelCase_( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , __magic_name__ : float ):
"""simple docstring"""
_lowerCAmelCase :List[Any] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_lowerCAmelCase :Union[str, Any] = atan((1 - flattening) * tan(radians(__magic_name__ ) ) )
_lowerCAmelCase :List[str] = atan((1 - flattening) * tan(radians(__magic_name__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_lowerCAmelCase :int = haversine_distance(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_lowerCAmelCase :str = (b_lata + b_lata) / 2
_lowerCAmelCase :Tuple = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_lowerCAmelCase :str = (sin(__magic_name__ ) ** 2) * (cos(__magic_name__ ) ** 2)
_lowerCAmelCase :Optional[int] = cos(sigma / 2 ) ** 2
_lowerCAmelCase :List[Any] = (sigma - sin(__magic_name__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_lowerCAmelCase :Dict = (cos(__magic_name__ ) ** 2) * (sin(__magic_name__ ) ** 2)
_lowerCAmelCase :str = sin(sigma / 2 ) ** 2
_lowerCAmelCase :Union[str, Any] = (sigma + sin(__magic_name__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod() | 687 | 1 |
def UpperCamelCase_( __magic_name__ : list ):
"""simple docstring"""
if len(__magic_name__ ) <= 1:
return [tuple(__magic_name__ )]
_lowerCAmelCase :List[str] = []
def generate(__magic_name__ : int , __magic_name__ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , __magic_name__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
_lowerCAmelCase , _lowerCAmelCase :List[str] = arr[k - 1], arr[i]
else: # k is odd
_lowerCAmelCase , _lowerCAmelCase :Dict = arr[k - 1], arr[0]
generate(k - 1 , __magic_name__ )
generate(len(__magic_name__ ) , __magic_name__ )
return res
if __name__ == "__main__":
a = input("""Enter numbers separated by a comma:\n""").strip()
a = [int(item) for item in user_input.split(""",""")]
print(heaps(arr)) | 687 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Dict = 'encoder-decoder'
lowerCamelCase : Optional[Any] = True
def __init__( self: str , **_UpperCAmelCase: int ):
super().__init__(**_UpperCAmelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_lowerCAmelCase :Optional[Any] = kwargs.pop('encoder' )
_lowerCAmelCase :Dict = encoder_config.pop('model_type' )
_lowerCAmelCase :str = kwargs.pop('decoder' )
_lowerCAmelCase :str = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase :str = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Tuple = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Any = True
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls: Tuple , _UpperCAmelCase: PretrainedConfig , _UpperCAmelCase: PretrainedConfig , **_UpperCAmelCase: str ):
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
_lowerCAmelCase :Dict = True
_lowerCAmelCase :List[str] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowerCAmelCase :Optional[int] = self.encoder.to_dict()
_lowerCAmelCase :Union[str, Any] = self.decoder.to_dict()
_lowerCAmelCase :List[str] = self.__class__.model_type
return output | 687 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def __init__( self: List[str] , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :List[str] = data
def __iter__( self: Optional[int] ):
for element in self.data:
yield element
def UpperCamelCase_( __magic_name__ : Optional[int]=True ):
"""simple docstring"""
_lowerCAmelCase :int = Accelerator(even_batches=__magic_name__ )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def UpperCamelCase_( __magic_name__ : Accelerator , __magic_name__ : int , __magic_name__ : int , __magic_name__ : bool = False ):
"""simple docstring"""
if iterable:
_lowerCAmelCase :str = DummyIterableDataset(torch.as_tensor(range(__magic_name__ ) ) )
else:
_lowerCAmelCase :List[str] = TensorDataset(torch.as_tensor(range(__magic_name__ ) ) )
_lowerCAmelCase :List[str] = DataLoader(__magic_name__ , batch_size=__magic_name__ )
_lowerCAmelCase :Union[str, Any] = accelerator.prepare(__magic_name__ )
return dl
def UpperCamelCase_( __magic_name__ : Accelerator , __magic_name__ : int , __magic_name__ : int , __magic_name__ : List[int] , __magic_name__ : List[int] , ):
"""simple docstring"""
_lowerCAmelCase :Union[str, Any] = create_dataloader(accelerator=__magic_name__ , dataset_size=__magic_name__ , batch_size=__magic_name__ )
_lowerCAmelCase :Dict = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__magic_name__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__magic_name__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :List[Any] = create_accelerator(even_batches=__magic_name__ )
verify_dataloader_batch_sizes(
__magic_name__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__magic_name__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = create_accelerator(even_batches=__magic_name__ )
_lowerCAmelCase :Any = torch.nn.Linear(1 , 1 )
_lowerCAmelCase :Any = accelerator.prepare(__magic_name__ )
_lowerCAmelCase :List[Any] = create_dataloader(__magic_name__ , dataset_size=3 , batch_size=1 )
_lowerCAmelCase :List[Any] = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__magic_name__ ):
_lowerCAmelCase :Union[str, Any] = ddp_model(batch[0].float() )
_lowerCAmelCase :str = output.sum()
loss.backward()
batch_idxs.append(__magic_name__ )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def UpperCamelCase_( __magic_name__ : str ):
"""simple docstring"""
with warnings.catch_warnings(record=__magic_name__ ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __magic_name__ )
assert "only supported for multi-GPU" in str(w[-1].message )
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = True
_lowerCAmelCase :Dict = False
_lowerCAmelCase :Optional[int] = create_accelerator(even_batches=__magic_name__ )
_lowerCAmelCase :int = torch.nn.Linear(1 , 1 )
_lowerCAmelCase :Optional[Any] = accelerator.prepare(__magic_name__ )
_lowerCAmelCase :Optional[int] = create_dataloader(__magic_name__ , dataset_size=3 , batch_size=1 )
_lowerCAmelCase :Optional[Any] = create_dataloader(__magic_name__ , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__magic_name__ ):
_lowerCAmelCase :Optional[int] = train_dl.batch_sampler.even_batches
_lowerCAmelCase :Optional[Any] = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :Union[str, Any] = True
_lowerCAmelCase :Tuple = False
_lowerCAmelCase :Any = create_accelerator(even_batches=__magic_name__ )
_lowerCAmelCase :Union[str, Any] = torch.nn.Linear(1 , 1 )
_lowerCAmelCase :str = accelerator.prepare(__magic_name__ )
create_dataloader(__magic_name__ , dataset_size=3 , batch_size=1 , iterable=__magic_name__ )
_lowerCAmelCase :Optional[Any] = create_dataloader(__magic_name__ , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('ignore' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__magic_name__ ):
_lowerCAmelCase :List[str] = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :str = create_accelerator()
_lowerCAmelCase :Dict = torch.nn.Linear(1 , 1 )
_lowerCAmelCase :int = accelerator.prepare(__magic_name__ )
create_dataloader(__magic_name__ , dataset_size=3 , batch_size=1 , iterable=__magic_name__ )
with warnings.catch_warnings(record=__magic_name__ ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__magic_name__ ):
pass
assert issubclass(w[-1].category , __magic_name__ )
assert "only supported for map-style datasets" in str(w[-1].message )
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :List[Any] = create_accelerator()
accelerator.print('Test that even_batches variable ensures uniform batches across processes' )
test_default_ensures_even_batch_sizes()
accelerator.print('Run tests with even_batches disabled' )
test_can_disable_even_batches()
accelerator.print('Test joining uneven inputs' )
test_can_join_uneven_inputs()
accelerator.print('Test overriding even_batches when joining uneven inputs' )
test_join_can_override_even_batches()
accelerator.print('Test overriding even_batches for mixed dataloader types' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('Test overriding even_batches raises a warning for iterable dataloaders' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('Test join with non DDP distributed raises warning' )
_lowerCAmelCase :Any = accelerator.state.distributed_type
_lowerCAmelCase :Union[str, Any] = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__magic_name__ )
_lowerCAmelCase :List[Any] = original_state
if __name__ == "__main__":
main() | 687 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: int , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=13 , _UpperCAmelCase: Optional[Any]=32 , _UpperCAmelCase: List[Any]=2 , _UpperCAmelCase: Optional[int]=3 , _UpperCAmelCase: Optional[int]=16 , _UpperCAmelCase: Optional[Any]=[32, 64, 128] , _UpperCAmelCase: Optional[int]=[1, 2, 1] , _UpperCAmelCase: int=[2, 2, 4] , _UpperCAmelCase: List[str]=2 , _UpperCAmelCase: Dict=2.0 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: str=0.0 , _UpperCAmelCase: int=0.0 , _UpperCAmelCase: str=0.1 , _UpperCAmelCase: Dict="gelu" , _UpperCAmelCase: Optional[Any]=False , _UpperCAmelCase: Union[str, Any]=True , _UpperCAmelCase: Union[str, Any]=0.0_2 , _UpperCAmelCase: Optional[int]=1e-5 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: Optional[Any]=None , _UpperCAmelCase: Tuple=True , _UpperCAmelCase: str=10 , _UpperCAmelCase: int=8 , _UpperCAmelCase: List[Any]=["stage1", "stage2"] , _UpperCAmelCase: List[Any]=[1, 2] , ):
_lowerCAmelCase :Optional[int] = parent
_lowerCAmelCase :Dict = batch_size
_lowerCAmelCase :Optional[Any] = image_size
_lowerCAmelCase :Optional[Any] = patch_size
_lowerCAmelCase :List[Any] = num_channels
_lowerCAmelCase :Optional[int] = embed_dim
_lowerCAmelCase :List[str] = hidden_sizes
_lowerCAmelCase :Union[str, Any] = depths
_lowerCAmelCase :int = num_heads
_lowerCAmelCase :Any = window_size
_lowerCAmelCase :List[Any] = mlp_ratio
_lowerCAmelCase :Optional[int] = qkv_bias
_lowerCAmelCase :Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase :Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase :Dict = drop_path_rate
_lowerCAmelCase :List[Any] = hidden_act
_lowerCAmelCase :Tuple = use_absolute_embeddings
_lowerCAmelCase :Optional[int] = patch_norm
_lowerCAmelCase :Optional[Any] = layer_norm_eps
_lowerCAmelCase :Union[str, Any] = initializer_range
_lowerCAmelCase :List[str] = is_training
_lowerCAmelCase :str = scope
_lowerCAmelCase :Optional[int] = use_labels
_lowerCAmelCase :List[Any] = type_sequence_label_size
_lowerCAmelCase :Union[str, Any] = encoder_stride
_lowerCAmelCase :Optional[int] = out_features
_lowerCAmelCase :List[str] = out_indices
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase :Dict = None
if self.use_labels:
_lowerCAmelCase :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase :str = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self: int ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple ):
_lowerCAmelCase :List[Any] = FocalNetModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCAmelCase :List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Union[str, Any] = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCAmelCase :Optional[int] = None
_lowerCAmelCase :Dict = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Any = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Any = FocalNetForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCAmelCase :List[Any] = 1
_lowerCAmelCase :List[Any] = FocalNetForMaskedImageModeling(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :int = model(_UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int , _UpperCAmelCase: Dict , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = self.type_sequence_label_size
_lowerCAmelCase :Dict = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase :Optional[int] = 1
_lowerCAmelCase :Tuple = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = config_and_inputs
_lowerCAmelCase :List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ (snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase : Optional[Any] = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Tuple = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Any = False
lowerCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = FocalNetModelTester(self )
_lowerCAmelCase :str = ConfigTester(self , config_class=_UpperCAmelCase , embed_dim=37 , has_text_modality=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
return
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def SCREAMING_SNAKE_CASE__ ( self: str ):
pass
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase :Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase , _lowerCAmelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Tuple = model_class(_UpperCAmelCase )
_lowerCAmelCase :Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase :int = [*signature.parameters.keys()]
_lowerCAmelCase :List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: int , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase :Optional[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_lowerCAmelCase :List[Any] = outputs.hidden_states
_lowerCAmelCase :str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# FocalNet has a different seq_length
_lowerCAmelCase :Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_lowerCAmelCase :List[str] = outputs.reshaped_hidden_states
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :int = reshaped_hidden_states[0].shape
_lowerCAmelCase :Optional[int] = (
reshaped_hidden_states[0].view(_UpperCAmelCase , _UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[int] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Dict = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase , _lowerCAmelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :str = 3
_lowerCAmelCase :Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCAmelCase :int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCAmelCase :Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :List[str] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Union[str, Any] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
@slow
def SCREAMING_SNAKE_CASE__ ( self: int ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase :List[Any] = FocalNetModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :Optional[int] = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
_lowerCAmelCase :str = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Tuple = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.default_image_processor
_lowerCAmelCase :Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCAmelCase :Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase :Dict = model(**_UpperCAmelCase )
# verify the logits
_lowerCAmelCase :str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
_lowerCAmelCase :Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : int = (FocalNetBackbone,) if is_torch_available() else ()
lowerCamelCase : str = FocalNetConfig
lowerCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Any = FocalNetModelTester(self ) | 687 | 1 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : float | Decimal , __magic_name__ : float = 10**-10 ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = a
while True:
_lowerCAmelCase :str = Decimal(__magic_name__ ) - (
Decimal(eval(__magic_name__ ) ) / Decimal(eval(str(diff(__magic_name__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__magic_name__ ) ) < precision: # noqa: S307
return float(__magic_name__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''') | 687 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
a = HfApi()
a = {}
# fmt: off
a = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
a = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
a = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
a = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
a = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
a = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
a = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
a = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
a = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
a = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
a = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
a = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
a = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
a = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
a = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
a = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
a = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith("""CompVis"""):
a = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
a = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
a = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
a = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
a = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3
)
print(F'''{mod.modelId} has passed successfully!!!''') | 687 | 1 |
def UpperCamelCase_( __magic_name__ : int = 4000000 ):
"""simple docstring"""
_lowerCAmelCase :Union[str, Any] = [0, 1]
_lowerCAmelCase :List[str] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
_lowerCAmelCase :Union[str, Any] = 0
for j in range(len(__magic_name__ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'''{solution() = }''') | 687 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Optional[int] = 10
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :str = [1, 2, 3, 4]
_lowerCAmelCase :Union[str, Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_lowerCAmelCase :Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :List[str] = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
_lowerCAmelCase , _lowerCAmelCase :Optional[Any] = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [] )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Optional[int] = ''
_lowerCAmelCase , _lowerCAmelCase :str = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [] )
self.assertEqual(_UpperCAmelCase , [] )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Optional[Any] = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
_lowerCAmelCase , _lowerCAmelCase :Optional[int] = process_story(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Optional[int] = ['It was the best of times.']
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :Union[str, Any] = torch.tensor([1, 2, 3, 4] )
_lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 0 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :List[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_lowerCAmelCase :Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 23 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 1 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :List[str] = 101
_lowerCAmelCase :Dict = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_lowerCAmelCase :int = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_lowerCAmelCase :List[str] = compute_token_type_ids(_UpperCAmelCase , _UpperCAmelCase )
np.testing.assert_array_equal(_UpperCAmelCase , _UpperCAmelCase ) | 687 | 1 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
a = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
a = direct_transformers_import(PATH_TO_TRANSFORMERS)
a = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
a = re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
a = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def UpperCamelCase_( __magic_name__ : List[str] ):
"""simple docstring"""
_lowerCAmelCase :Optional[int] = None
# source code of `config_class`
_lowerCAmelCase :List[str] = inspect.getsource(__magic_name__ )
_lowerCAmelCase :Any = _re_checkpoint.findall(__magic_name__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
_lowerCAmelCase :List[str] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
_lowerCAmelCase :Dict = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
_lowerCAmelCase :Optional[Any] = ckpt_name
break
return checkpoint
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :Tuple = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
_lowerCAmelCase :Optional[int] = get_checkpoint_from_config_class(__magic_name__ )
_lowerCAmelCase :Any = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__magic_name__ )
if len(__magic_name__ ) > 0:
_lowerCAmelCase :int = '\n'.join(sorted(__magic_name__ ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints() | 687 |
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
a = int(input("""Enter number: """).strip())
print(F'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''') | 687 | 1 |
from collections import deque
def UpperCamelCase_( __magic_name__ : List[Any] ):
"""simple docstring"""
_lowerCAmelCase :Dict = len(__magic_name__ )
_lowerCAmelCase :int = deque()
_lowerCAmelCase :List[Any] = [False for _ in range(__magic_name__ )]
_lowerCAmelCase :str = [-1 for _ in range(__magic_name__ )]
_lowerCAmelCase :List[str] = index_of[:]
def strong_connect(__magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : int ):
_lowerCAmelCase :int = index # the number when this node is seen
_lowerCAmelCase :Dict = index # lowest rank node reachable from here
index += 1
stack.append(__magic_name__ )
_lowerCAmelCase :Dict = True
for w in g[v]:
if index_of[w] == -1:
_lowerCAmelCase :Union[str, Any] = strong_connect(__magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Optional[int] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
_lowerCAmelCase :Optional[Any] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
_lowerCAmelCase :Union[str, Any] = []
_lowerCAmelCase :Optional[int] = stack.pop()
_lowerCAmelCase :Dict = False
component.append(__magic_name__ )
while w != v:
_lowerCAmelCase :Union[str, Any] = stack.pop()
_lowerCAmelCase :str = False
component.append(__magic_name__ )
components.append(__magic_name__ )
return index
_lowerCAmelCase :Union[str, Any] = []
for v in range(__magic_name__ ):
if index_of[v] == -1:
strong_connect(__magic_name__ , 0 , __magic_name__ )
return components
def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : Any ):
"""simple docstring"""
_lowerCAmelCase :Union[str, Any] = [[] for _ in range(__magic_name__ )]
for u, v in edges:
g[u].append(__magic_name__ )
return g
if __name__ == "__main__":
# Test
a = 7
a = [0, 0, 1, 2, 3, 3, 4, 4, 6]
a = [1, 3, 2, 0, 1, 4, 5, 6, 5]
a = [(u, v) for u, v in zip(source, target)]
a = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g) | 687 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: List[Any] , _UpperCAmelCase: int , _UpperCAmelCase: MutableSequence[float] ):
if len(_UpperCAmelCase ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
_lowerCAmelCase :list[float] = list(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = degree
def __add__( self: str , _UpperCAmelCase: Polynomial ):
if self.degree > polynomial_a.degree:
_lowerCAmelCase :Any = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _UpperCAmelCase )
else:
_lowerCAmelCase :List[Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _UpperCAmelCase )
def __sub__( self: str , _UpperCAmelCase: Polynomial ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self: Union[str, Any] ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self: int , _UpperCAmelCase: Polynomial ):
_lowerCAmelCase :list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: int | float ):
_lowerCAmelCase :int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self: Union[str, Any] ):
_lowerCAmelCase :Dict = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_UpperCAmelCase )
return polynomial
def __repr__( self: Optional[Any] ):
return self.__str__()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :list[float] = [0] * self.degree
for i in range(self.degree ):
_lowerCAmelCase :Tuple = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int | float = 0 ):
_lowerCAmelCase :list[float] = [0] * (self.degree + 2)
_lowerCAmelCase :str = constant
for i in range(self.degree + 1 ):
_lowerCAmelCase :List[str] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _UpperCAmelCase )
def __eq__( self: List[Any] , _UpperCAmelCase: object ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self: Optional[Any] , _UpperCAmelCase: object ):
return not self.__eq__(_UpperCAmelCase ) | 687 | 1 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Any = ComputeEnvironment.AMAZON_SAGEMAKER
lowerCamelCase : Tuple = True
lowerCamelCase : List[Any] = 'ml.p3.2xlarge'
lowerCamelCase : Any = 'accelerate_sagemaker_execution_role'
lowerCamelCase : Union[str, Any] = 'hf-sm'
lowerCamelCase : List[Any] = 'us-east-1'
lowerCamelCase : str = 1
lowerCamelCase : int = 'accelerate-sagemaker-1'
lowerCamelCase : Optional[Any] = '1.6'
lowerCamelCase : Union[str, Any] = '4.4'
lowerCamelCase : str = 'train.py'
lowerCamelCase : Any = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
lowerCamelCase : Tuple = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
_lowerCAmelCase :Optional[Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['model_name_or_path'] , _UpperCAmelCase )
assert isinstance(converted_args['do_train'] , _UpperCAmelCase )
assert isinstance(converted_args['epochs'] , _UpperCAmelCase )
assert isinstance(converted_args['learning_rate'] , _UpperCAmelCase )
assert isinstance(converted_args['max_steps'] , _UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args ) | 687 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 | 1 |
a = """Alexander Joslin"""
import operator as op
from .stack import Stack
def UpperCamelCase_( __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :Tuple = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
_lowerCAmelCase :Stack[int] = Stack()
_lowerCAmelCase :Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__magic_name__ ) )
elif i in operators:
# RULE 2
operator_stack.push(__magic_name__ )
elif i == ")":
# RULE 4
_lowerCAmelCase :Tuple = operator_stack.peek()
operator_stack.pop()
_lowerCAmelCase :Optional[int] = operand_stack.peek()
operand_stack.pop()
_lowerCAmelCase :Any = operand_stack.peek()
operand_stack.pop()
_lowerCAmelCase :Any = operators[opr](__magic_name__ , __magic_name__ )
operand_stack.push(__magic_name__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
a = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''') | 687 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : float | Decimal , __magic_name__ : float = 10**-10 ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = a
while True:
_lowerCAmelCase :str = Decimal(__magic_name__ ) - (
Decimal(eval(__magic_name__ ) ) / Decimal(eval(str(diff(__magic_name__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__magic_name__ ) ) < precision: # noqa: S307
return float(__magic_name__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''') | 687 | 1 |
import requests
from bsa import BeautifulSoup
def UpperCamelCase_( __magic_name__ : str = "AAPL" ):
"""simple docstring"""
_lowerCAmelCase :int = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
_lowerCAmelCase :List[str] = BeautifulSoup(requests.get(__magic_name__ ).text , 'html.parser' )
_lowerCAmelCase :Union[str, Any] = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''') | 687 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
a = {
"""sample_size""": 32,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [32, 64],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a = {
"""sample_size""": 64,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a = {
"""sample_size""": 256,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a = {
"""num_train_timesteps""": 40,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
a = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
a = {
"""num_train_timesteps""": 151,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
def UpperCamelCase_( __magic_name__ : Dict ):
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any]=False ):
"""simple docstring"""
_lowerCAmelCase :int = checkpoint[f"""{old_prefix}.in_layers.0.weight"""]
_lowerCAmelCase :Union[str, Any] = checkpoint[f"""{old_prefix}.in_layers.0.bias"""]
_lowerCAmelCase :str = checkpoint[f"""{old_prefix}.in_layers.2.weight"""]
_lowerCAmelCase :Optional[Any] = checkpoint[f"""{old_prefix}.in_layers.2.bias"""]
_lowerCAmelCase :str = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""]
_lowerCAmelCase :Any = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""]
_lowerCAmelCase :str = checkpoint[f"""{old_prefix}.out_layers.0.weight"""]
_lowerCAmelCase :List[Any] = checkpoint[f"""{old_prefix}.out_layers.0.bias"""]
_lowerCAmelCase :Optional[int] = checkpoint[f"""{old_prefix}.out_layers.3.weight"""]
_lowerCAmelCase :Dict = checkpoint[f"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
_lowerCAmelCase :List[Any] = checkpoint[f"""{old_prefix}.skip_connection.weight"""]
_lowerCAmelCase :int = checkpoint[f"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : List[str]=None ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Tuple = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Any = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
_lowerCAmelCase :int = checkpoint[f"""{old_prefix}.norm.weight"""]
_lowerCAmelCase :Dict = checkpoint[f"""{old_prefix}.norm.bias"""]
_lowerCAmelCase :Dict = weight_q.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :str = bias_q.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :List[str] = weight_k.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :Optional[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :Tuple = weight_v.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :List[Any] = bias_v.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :int = (
checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
_lowerCAmelCase :Optional[Any] = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Optional[Any] ):
"""simple docstring"""
_lowerCAmelCase :Union[str, Any] = torch.load(__magic_name__ , map_location='cpu' )
_lowerCAmelCase :List[Any] = {}
_lowerCAmelCase :List[str] = checkpoint['time_embed.0.weight']
_lowerCAmelCase :Tuple = checkpoint['time_embed.0.bias']
_lowerCAmelCase :Dict = checkpoint['time_embed.2.weight']
_lowerCAmelCase :Union[str, Any] = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_lowerCAmelCase :Union[str, Any] = checkpoint['label_emb.weight']
_lowerCAmelCase :str = checkpoint['input_blocks.0.0.weight']
_lowerCAmelCase :str = checkpoint['input_blocks.0.0.bias']
_lowerCAmelCase :List[Any] = unet_config['down_block_types']
_lowerCAmelCase :Any = unet_config['layers_per_block']
_lowerCAmelCase :List[Any] = unet_config['attention_head_dim']
_lowerCAmelCase :Tuple = unet_config['block_out_channels']
_lowerCAmelCase :List[str] = 1
_lowerCAmelCase :Optional[int] = channels_list[0]
for i, layer_type in enumerate(__magic_name__ ):
_lowerCAmelCase :Tuple = channels_list[i]
_lowerCAmelCase :Optional[Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__magic_name__ ):
_lowerCAmelCase :int = f"""down_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :List[Any] = f"""input_blocks.{current_layer}.0"""
_lowerCAmelCase :int = True if j == 0 and downsample_block_has_skip else False
_lowerCAmelCase :List[Any] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__magic_name__ ):
_lowerCAmelCase :List[str] = f"""down_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :Optional[int] = f"""input_blocks.{current_layer}.0"""
_lowerCAmelCase :List[str] = True if j == 0 and downsample_block_has_skip else False
_lowerCAmelCase :Optional[int] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
_lowerCAmelCase :Optional[int] = f"""down_blocks.{i}.attentions.{j}"""
_lowerCAmelCase :str = f"""input_blocks.{current_layer}.1"""
_lowerCAmelCase :Optional[Any] = convert_attention(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
_lowerCAmelCase :Union[str, Any] = f"""down_blocks.{i}.downsamplers.0"""
_lowerCAmelCase :Tuple = f"""input_blocks.{current_layer}.0"""
_lowerCAmelCase :Optional[int] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
current_layer += 1
_lowerCAmelCase :Dict = current_channels
# hardcoded the mid-block for now
_lowerCAmelCase :int = 'mid_block.resnets.0'
_lowerCAmelCase :Optional[Any] = 'middle_block.0'
_lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Optional[int] = 'mid_block.attentions.0'
_lowerCAmelCase :Optional[int] = 'middle_block.1'
_lowerCAmelCase :List[Any] = convert_attention(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Union[str, Any] = 'mid_block.resnets.1'
_lowerCAmelCase :Optional[int] = 'middle_block.2'
_lowerCAmelCase :int = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Tuple = 0
_lowerCAmelCase :str = unet_config['up_block_types']
for i, layer_type in enumerate(__magic_name__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCAmelCase :Optional[Any] = f"""up_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :Dict = f"""output_blocks.{current_layer}.0"""
_lowerCAmelCase :Any = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
_lowerCAmelCase :Any = f"""up_blocks.{i}.upsamplers.0"""
_lowerCAmelCase :Dict = f"""output_blocks.{current_layer-1}.1"""
_lowerCAmelCase :Tuple = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCAmelCase :Tuple = f"""up_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :List[str] = f"""output_blocks.{current_layer}.0"""
_lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
_lowerCAmelCase :str = f"""up_blocks.{i}.attentions.{j}"""
_lowerCAmelCase :List[Any] = f"""output_blocks.{current_layer}.1"""
_lowerCAmelCase :int = convert_attention(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
_lowerCAmelCase :Optional[int] = f"""up_blocks.{i}.upsamplers.0"""
_lowerCAmelCase :int = f"""output_blocks.{current_layer-1}.2"""
_lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :str = checkpoint['out.0.weight']
_lowerCAmelCase :Union[str, Any] = checkpoint['out.0.bias']
_lowerCAmelCase :List[Any] = checkpoint['out.2.weight']
_lowerCAmelCase :Dict = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
a = parser.parse_args()
a = strabool(args.class_cond)
a = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
a = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
a = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
a = None
a = con_pt_to_diffuser(args.unet_path, unet_config)
a = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
a = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
a = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
a = CMStochasticIterativeScheduler(**scheduler_config)
a = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path) | 687 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = (DEISMultistepScheduler,)
lowerCamelCase : int = (('num_inference_steps', 25),)
def SCREAMING_SNAKE_CASE__ ( self: List[str] , **_UpperCAmelCase: Dict ):
_lowerCAmelCase :int = {
'num_train_timesteps': 1000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'solver_order': 2,
}
config.update(**_UpperCAmelCase )
return config
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: List[Any]=0 , **_UpperCAmelCase: Tuple ):
_lowerCAmelCase :Optional[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase :Optional[int] = kwargs.pop('num_inference_steps' , _UpperCAmelCase )
_lowerCAmelCase :Dict = self.dummy_sample
_lowerCAmelCase :List[str] = 0.1 * sample
_lowerCAmelCase :int = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase :List[str] = self.get_scheduler_config(**_UpperCAmelCase )
_lowerCAmelCase :List[str] = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
_lowerCAmelCase :List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
_lowerCAmelCase :Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase , _lowerCAmelCase :List[str] = sample, sample
for t in range(_UpperCAmelCase , time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase :str = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_lowerCAmelCase :Union[str, Any] = new_scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self: Any ):
pass
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: int=0 , **_UpperCAmelCase: List[Any] ):
_lowerCAmelCase :List[str] = dict(self.forward_default_kwargs )
_lowerCAmelCase :int = kwargs.pop('num_inference_steps' , _UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = self.dummy_sample
_lowerCAmelCase :Optional[Any] = 0.1 * sample
_lowerCAmelCase :Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase :Any = self.get_scheduler_config()
_lowerCAmelCase :Union[str, Any] = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase :Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
_lowerCAmelCase :str = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase :Tuple = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase :Dict = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_lowerCAmelCase :Any = new_scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: Optional[int]=None , **_UpperCAmelCase: Any ):
if scheduler is None:
_lowerCAmelCase :List[str] = self.scheduler_classes[0]
_lowerCAmelCase :Optional[int] = self.get_scheduler_config(**_UpperCAmelCase )
_lowerCAmelCase :Any = scheduler_class(**_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = self.scheduler_classes[0]
_lowerCAmelCase :List[Any] = self.get_scheduler_config(**_UpperCAmelCase )
_lowerCAmelCase :Dict = scheduler_class(**_UpperCAmelCase )
_lowerCAmelCase :Tuple = 10
_lowerCAmelCase :List[Any] = self.dummy_model()
_lowerCAmelCase :int = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase :str = model(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
return sample
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Any = dict(self.forward_default_kwargs )
_lowerCAmelCase :Union[str, Any] = kwargs.pop('num_inference_steps' , _UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase :int = self.get_scheduler_config()
_lowerCAmelCase :List[str] = scheduler_class(**_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = self.dummy_sample
_lowerCAmelCase :Union[str, Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCAmelCase , 'set_timesteps' ):
scheduler.set_timesteps(_UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(_UpperCAmelCase , 'set_timesteps' ):
_lowerCAmelCase :Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase :Optional[int] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
_lowerCAmelCase :List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
_lowerCAmelCase :Tuple = scheduler.timesteps[5]
_lowerCAmelCase :Union[str, Any] = scheduler.timesteps[6]
_lowerCAmelCase :List[str] = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_lowerCAmelCase :Optional[int] = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE__ ( self: int ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_lowerCAmelCase :Dict = DEISMultistepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase :Dict = self.full_loop(scheduler=_UpperCAmelCase )
_lowerCAmelCase :Tuple = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
_lowerCAmelCase :Optional[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase :str = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase :List[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase :Dict = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase :str = self.full_loop(scheduler=_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
self.check_over_configs(thresholding=_UpperCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_UpperCAmelCase , prediction_type=_UpperCAmelCase , sample_max_value=_UpperCAmelCase , algorithm_type='deis' , solver_order=_UpperCAmelCase , solver_type=_UpperCAmelCase , )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_UpperCAmelCase , solver_type=_UpperCAmelCase , prediction_type=_UpperCAmelCase , algorithm_type=_UpperCAmelCase , )
_lowerCAmelCase :Any = self.full_loop(
solver_order=_UpperCAmelCase , solver_type=_UpperCAmelCase , prediction_type=_UpperCAmelCase , algorithm_type=_UpperCAmelCase , )
assert not torch.isnan(_UpperCAmelCase ).any(), "Samples have nan numbers"
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
self.check_over_configs(lower_order_final=_UpperCAmelCase )
self.check_over_configs(lower_order_final=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_UpperCAmelCase , time_step=0 )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :Tuple = self.full_loop()
_lowerCAmelCase :str = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Any = self.full_loop(prediction_type='v_prediction' )
_lowerCAmelCase :Optional[int] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :Optional[int] = self.scheduler_classes[0]
_lowerCAmelCase :List[Any] = self.get_scheduler_config(thresholding=_UpperCAmelCase , dynamic_thresholding_ratio=0 )
_lowerCAmelCase :Any = scheduler_class(**_UpperCAmelCase )
_lowerCAmelCase :List[str] = 10
_lowerCAmelCase :Any = self.dummy_model()
_lowerCAmelCase :Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase :Union[str, Any] = model(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Any = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
assert sample.dtype == torch.floataa | 687 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
a = """ \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
_lowerCAmelCase :Tuple = self.diffusers_dir
shutil.copy(
os.path.join(_UpperCAmelCase , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :str = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=None ):
_lowerCAmelCase :int = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_lowerCAmelCase :Dict = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_lowerCAmelCase :Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_lowerCAmelCase :List[str] = black.format_str(_UpperCAmelCase , mode=_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(_UpperCAmelCase , 'w' , newline='\n' ) as f:
f.write(_UpperCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_UpperCAmelCase )
with open(_UpperCAmelCase , 'r' ) as f:
self.assertTrue(f.read() , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :List[str] = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
# Base copy consistency
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , _UpperCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , )
# Copy consistency with a really long name
_lowerCAmelCase :Optional[int] = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , _UpperCAmelCase , _UpperCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , _UpperCAmelCase , overwrite_result=re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , ) | 687 | 1 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
a = logging.get_logger(__name__)
a = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: List[Any] , _UpperCAmelCase: Union[str, Any]=None , **_UpperCAmelCase: List[str] ):
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
_lowerCAmelCase :str = model
_lowerCAmelCase :Union[str, Any] = kwargs.get('model_save_dir' , _UpperCAmelCase )
_lowerCAmelCase :List[Any] = kwargs.get('latest_model_name' , _UpperCAmelCase )
def __call__( self: Optional[Any] , **_UpperCAmelCase: Dict ):
_lowerCAmelCase :Tuple = {k: np.array(_UpperCAmelCase ) for k, v in kwargs.items()}
return self.model.run(_UpperCAmelCase , _UpperCAmelCase )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase: Union[str, Path] , _UpperCAmelCase: Tuple=None , _UpperCAmelCase: Tuple=None ):
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
_lowerCAmelCase :Dict = 'CPUExecutionProvider'
return ort.InferenceSession(_UpperCAmelCase , providers=[provider] , sess_options=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: Union[str, Path] , _UpperCAmelCase: Optional[str] = None , **_UpperCAmelCase: Any ):
_lowerCAmelCase :Optional[int] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
_lowerCAmelCase :Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name )
_lowerCAmelCase :str = Path(_UpperCAmelCase ).joinpath(_UpperCAmelCase )
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
_lowerCAmelCase :Optional[Any] = self.model_save_dir.joinpath(_UpperCAmelCase )
if src_path.exists():
_lowerCAmelCase :Any = Path(_UpperCAmelCase ).joinpath(_UpperCAmelCase )
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase )
except shutil.SameFileError:
pass
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Union[str, os.PathLike] , **_UpperCAmelCase: Optional[Any] , ):
if os.path.isfile(_UpperCAmelCase ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
# saving model weights/files
self._save_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls: Tuple , _UpperCAmelCase: Union[str, Path] , _UpperCAmelCase: Optional[Union[bool, str, None]] = None , _UpperCAmelCase: Optional[Union[str, None]] = None , _UpperCAmelCase: bool = False , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional["ort.SessionOptions"] = None , **_UpperCAmelCase: List[str] , ):
_lowerCAmelCase :List[Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCAmelCase ):
_lowerCAmelCase :int = OnnxRuntimeModel.load_model(
os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase )
_lowerCAmelCase :str = Path(_UpperCAmelCase )
# load model from hub
else:
# download model
_lowerCAmelCase :Union[str, Any] = hf_hub_download(
repo_id=_UpperCAmelCase , filename=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , )
_lowerCAmelCase :List[str] = Path(_UpperCAmelCase ).parent
_lowerCAmelCase :Union[str, Any] = Path(_UpperCAmelCase ).name
_lowerCAmelCase :Any = OnnxRuntimeModel.load_model(_UpperCAmelCase , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase )
return cls(model=_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls: Union[str, Any] , _UpperCAmelCase: Union[str, Path] , _UpperCAmelCase: bool = True , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[str] = None , **_UpperCAmelCase: Any , ):
_lowerCAmelCase :List[str] = None
if len(str(_UpperCAmelCase ).split('@' ) ) == 2:
_lowerCAmelCase , _lowerCAmelCase :Any = model_id.split('@' )
return cls._from_pretrained(
model_id=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , **_UpperCAmelCase , ) | 687 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be trained.'} )
lowerCamelCase : Optional[str] = field(
default='./' , metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path of training dataset.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for training.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for evaluation.'} )
lowerCamelCase : Optional[float] = field(default=0.1 , metadata={'help': 'Value of weight decay.'} )
lowerCamelCase : Optional[int] = field(
default=1_00_00 , metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} )
lowerCamelCase : Optional[float] = field(default=2e-4 , metadata={'help': 'Learning rate fo training.'} )
lowerCamelCase : Optional[str] = field(default='cosine' , metadata={'help': 'Learning rate.'} )
lowerCamelCase : Optional[int] = field(
default=7_50 , metadata={'help': 'Number of warmup steps in the learning rate schedule.'} )
lowerCamelCase : Optional[int] = field(
default=16 , metadata={'help': 'Number of gradient accumulation steps.'} )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} )
lowerCamelCase : Optional[int] = field(default=5_00_00 , metadata={'help': 'Maximum number of training steps.'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Sequence lengths used for training.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Training seed.'} )
lowerCamelCase : Optional[int] = field(
default=10_24 , metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} , )
lowerCamelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'States path if the training should continue from a checkpoint folder.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'If True the data is pretokenized.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size used for evaluation.'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Length of sequences to be evaluated.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} )
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} , )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Sample from the language model\'s output distribution.'} )
lowerCamelCase : Optional[float] = field(default=0.2 , metadata={'help': 'Sampling temperature used for generation.'} )
lowerCamelCase : Optional[int] = field(default=2_56 , metadata={'help': 'Maximum number of newly generated tokens.'} )
lowerCamelCase : Optional[int] = field(default=0 , metadata={'help': 'Top-k parameter used for generation.'} )
lowerCamelCase : Optional[float] = field(default=0.95 , metadata={'help': 'Top-p parameter used for nucleus sampling.'} )
lowerCamelCase : Optional[int] = field(default=10 , metadata={'help': 'Number of generations to run in parallel.'} )
lowerCamelCase : Optional[int] = field(
default=2_00 , metadata={'help': 'Number of completions to generate for each sample.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase : Optional[str] = field(
default='eval_results.json' , metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase : Optional[str] = field(
default='0' , metadata={'help': 'Allow `code_eval` to execute Python code on machine'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={
'help': (
'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'
' number corresponds to which GPU device id to run on.'
)
} , )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={
'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'
} , )
lowerCamelCase : Optional[str] = field(
default='transformersbook/codeparrot' , metadata={'help': 'Folder or name of dataset to process.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot-clean' , metadata={'help': 'Folder to save processed processed dataset.'} )
lowerCamelCase : Optional[int] = field(
default=10_00_00 , metadata={'help': 'Number of files to save per JSON output file.'} )
lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase : Optional[float] = field(
default=10_00 , metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=1_00 , metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=0.25 , metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=1.5 , metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=0.7 , metadata={'help': 'Probability for filtering config, test and uncommon files.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} , )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'If True, near-duplicate samples are removed.'} )
lowerCamelCase : Optional[float] = field(
default=0.85 , metadata={'help': 'Jaccard threshold for near-duplicate samples.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='gpt2' , metadata={'help': 'Base tokenizer to build new tokenizer from.'} )
lowerCamelCase : Optional[str] = field(
default='transformersbook/codeparrot-train' , metadata={'help': 'Dataset to train tokenizer on.'} )
lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase : Optional[int] = field(default=20_00_00 , metadata={'help': 'Number of examples to train tokenizer on.'} )
lowerCamelCase : Optional[int] = field(
default=3_27_68 , metadata={'help': 'Number of examples to train the tokenizer on.'} )
lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of new tokenizer.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path to the dataset to pretokenize.'} )
lowerCamelCase : Optional[str] = field(
default='tokenized-codeparrot-train' , metadata={'help': 'Repo name of the pretokenized data.'} )
lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='gpt2-large' , metadata={'help': 'Configuration to use for model initialization.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Tokenizer attached to model.'} )
lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of the created model.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} ) | 687 | 1 |
from ...processing_utils import ProcessorMixin
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Tuple = ['image_processor', 'feature_extractor']
lowerCamelCase : str = 'TvltImageProcessor'
lowerCamelCase : Any = 'TvltFeatureExtractor'
def __init__( self: Optional[int] , _UpperCAmelCase: Dict , _UpperCAmelCase: Tuple ):
super().__init__(image_processor=_UpperCAmelCase , feature_extractor=_UpperCAmelCase )
_lowerCAmelCase :str = image_processor
_lowerCAmelCase :Union[str, Any] = feature_extractor
def __call__( self: Dict , _UpperCAmelCase: List[Any]=None , _UpperCAmelCase: Dict=None , _UpperCAmelCase: Optional[int]=None , _UpperCAmelCase: Optional[Any]=None , _UpperCAmelCase: List[Any]=False , _UpperCAmelCase: Union[str, Any]=False , *_UpperCAmelCase: Optional[Any] , **_UpperCAmelCase: Dict , ):
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.' )
_lowerCAmelCase :str = None
if images is not None:
_lowerCAmelCase :List[str] = self.image_processor(_UpperCAmelCase , mask_pixel=_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
if images_mixed is not None:
_lowerCAmelCase :Union[str, Any] = self.image_processor(_UpperCAmelCase , is_mixed=_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
if audio is not None:
_lowerCAmelCase :List[str] = self.feature_extractor(
_UpperCAmelCase , *_UpperCAmelCase , sampling_rate=_UpperCAmelCase , mask_audio=_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = {}
if audio is not None:
output_dict.update(_UpperCAmelCase )
if images is not None:
output_dict.update(_UpperCAmelCase )
if images_mixed_dict is not None:
output_dict.update(_UpperCAmelCase )
return output_dict
@property
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :Dict = self.image_processor.model_input_names
_lowerCAmelCase :Tuple = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) ) | 687 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :List[str] = 'ylacombe/bark-small'
_lowerCAmelCase :int = tempfile.mkdtemp()
_lowerCAmelCase :List[str] = 'en_speaker_1'
_lowerCAmelCase :Union[str, Any] = 'This is a test string'
_lowerCAmelCase :List[Any] = 'speaker_embeddings_path.json'
_lowerCAmelCase :str = 'speaker_embeddings'
def SCREAMING_SNAKE_CASE__ ( self: str , **_UpperCAmelCase: Optional[Any] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :List[Any] = self.get_tokenizer()
_lowerCAmelCase :List[str] = BarkProcessor(tokenizer=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase :List[str] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_lowerCAmelCase :Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_lowerCAmelCase :Any = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_lowerCAmelCase :List[Any] = 35
_lowerCAmelCase :Optional[int] = 2
_lowerCAmelCase :Dict = 8
_lowerCAmelCase :Dict = {
'semantic_prompt': np.ones(_UpperCAmelCase ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_lowerCAmelCase :Dict = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
_lowerCAmelCase :List[Any] = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_lowerCAmelCase :int = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Dict = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_lowerCAmelCase :Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Tuple = self.get_tokenizer()
_lowerCAmelCase :Union[str, Any] = BarkProcessor(tokenizer=_UpperCAmelCase )
_lowerCAmelCase :List[Any] = processor(text=self.input_string )
_lowerCAmelCase :List[str] = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() ) | 687 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
a = None
a = logging.get_logger(__name__)
a = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
a = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
},
"""tokenizer_file""": {
"""google/bigbird-roberta-base""": (
"""https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"""
),
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"""
),
},
}
a = {
"""google/bigbird-roberta-base""": 4_096,
"""google/bigbird-roberta-large""": 4_096,
"""google/bigbird-base-trivia-itc""": 4_096,
}
a = """▁"""
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Any = VOCAB_FILES_NAMES
lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[str] = BigBirdTokenizer
lowerCamelCase : Tuple = ['input_ids', 'attention_mask']
lowerCamelCase : List[int] = []
def __init__( self: int , _UpperCAmelCase: int=None , _UpperCAmelCase: Union[str, Any]=None , _UpperCAmelCase: Dict="<unk>" , _UpperCAmelCase: Any="<s>" , _UpperCAmelCase: Optional[Any]="</s>" , _UpperCAmelCase: List[Any]="<pad>" , _UpperCAmelCase: Union[str, Any]="[SEP]" , _UpperCAmelCase: str="[MASK]" , _UpperCAmelCase: Optional[int]="[CLS]" , **_UpperCAmelCase: Union[str, Any] , ):
_lowerCAmelCase :int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else bos_token
_lowerCAmelCase :Union[str, Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token
_lowerCAmelCase :List[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
_lowerCAmelCase :str = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token
_lowerCAmelCase :int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cls_token
_lowerCAmelCase :int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase :Optional[int] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , **_UpperCAmelCase , )
_lowerCAmelCase :str = vocab_file
_lowerCAmelCase :int = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: List[int] , _UpperCAmelCase: Optional[List[int]] = None ):
_lowerCAmelCase :Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase :Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: List[int] , _UpperCAmelCase: Optional[List[int]] = None , _UpperCAmelCase: bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: List[int] , _UpperCAmelCase: Optional[List[int]] = None ):
_lowerCAmelCase :Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase :List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: str , _UpperCAmelCase: Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase :Optional[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
return (out_vocab_file,) | 687 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : int = 'bert'
def __init__( self: Optional[Any] , _UpperCAmelCase: Tuple=3_0522 , _UpperCAmelCase: int=768 , _UpperCAmelCase: Union[str, Any]=12 , _UpperCAmelCase: Dict=12 , _UpperCAmelCase: List[Any]=3072 , _UpperCAmelCase: List[Any]="gelu" , _UpperCAmelCase: Union[str, Any]=0.1 , _UpperCAmelCase: Dict=0.1 , _UpperCAmelCase: List[Any]=512 , _UpperCAmelCase: Optional[Any]=2 , _UpperCAmelCase: Optional[int]=0.0_2 , _UpperCAmelCase: Any=1e-1_2 , _UpperCAmelCase: Optional[Any]=0 , _UpperCAmelCase: Union[str, Any]="absolute" , _UpperCAmelCase: Dict=True , _UpperCAmelCase: Optional[Any]=None , **_UpperCAmelCase: Optional[int] , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :List[Any] = vocab_size
_lowerCAmelCase :Tuple = hidden_size
_lowerCAmelCase :Dict = num_hidden_layers
_lowerCAmelCase :Optional[Any] = num_attention_heads
_lowerCAmelCase :List[Any] = hidden_act
_lowerCAmelCase :int = intermediate_size
_lowerCAmelCase :Tuple = hidden_dropout_prob
_lowerCAmelCase :Tuple = attention_probs_dropout_prob
_lowerCAmelCase :List[Any] = max_position_embeddings
_lowerCAmelCase :Dict = type_vocab_size
_lowerCAmelCase :Any = initializer_range
_lowerCAmelCase :int = layer_norm_eps
_lowerCAmelCase :List[Any] = position_embedding_type
_lowerCAmelCase :int = use_cache
_lowerCAmelCase :Union[str, Any] = classifier_dropout
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
if self.task == "multiple-choice":
_lowerCAmelCase :List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase :Any = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] ) | 687 | 1 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def UpperCamelCase_( __magic_name__ : List[Any] ):
"""simple docstring"""
_lowerCAmelCase :int = filter(lambda __magic_name__ : p.requires_grad , model.parameters() )
_lowerCAmelCase :List[str] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
a = logging.getLogger(__name__)
def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : Dict ):
"""simple docstring"""
if metric == "rouge2":
_lowerCAmelCase :List[Any] = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_lowerCAmelCase :Optional[int] = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_lowerCAmelCase :Union[str, Any] = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_lowerCAmelCase :Optional[Any] = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
' function.' )
_lowerCAmelCase :int = ModelCheckpoint(
dirpath=__magic_name__ , filename=__magic_name__ , monitor=f"""val_{metric}""" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def UpperCamelCase_( __magic_name__ : Dict , __magic_name__ : Optional[Any] ):
"""simple docstring"""
return EarlyStopping(
monitor=f"""val_{metric}""" , mode='min' if 'loss' in metric else 'max' , patience=__magic_name__ , verbose=__magic_name__ , )
class UpperCAmelCase_ (pl.Callback ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Dict , _UpperCAmelCase: str ):
_lowerCAmelCase :Any = {f"""lr_group_{i}""": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_UpperCAmelCase )
@rank_zero_only
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: pl.Trainer , _UpperCAmelCase: pl.LightningModule , _UpperCAmelCase: str , _UpperCAmelCase: Optional[int]=True ):
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
_lowerCAmelCase :str = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
_lowerCAmelCase :List[str] = Path(pl_module.hparams.output_dir )
if type_path == "test":
_lowerCAmelCase :int = od / 'test_results.txt'
_lowerCAmelCase :int = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_lowerCAmelCase :Optional[int] = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
_lowerCAmelCase :str = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=_UpperCAmelCase )
generations_file.parent.mkdir(exist_ok=_UpperCAmelCase )
with open(_UpperCAmelCase , 'a+' ) as writer:
for key in sorted(_UpperCAmelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
_lowerCAmelCase :List[Any] = metrics[key]
if isinstance(_UpperCAmelCase , torch.Tensor ):
_lowerCAmelCase :int = val.item()
_lowerCAmelCase :Union[str, Any] = f"""{key}: {val:.6f}\n"""
writer.write(_UpperCAmelCase )
if not save_generations:
return
if "preds" in metrics:
_lowerCAmelCase :Optional[int] = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(_UpperCAmelCase )
@rank_zero_only
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any ):
try:
_lowerCAmelCase :Optional[Any] = pl_module.model.model.num_parameters()
except AttributeError:
_lowerCAmelCase :Tuple = pl_module.model.num_parameters()
_lowerCAmelCase :Dict = count_trainable_parameters(_UpperCAmelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} )
@rank_zero_only
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: pl.Trainer , _UpperCAmelCase: pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_UpperCAmelCase , _UpperCAmelCase , 'test' )
@rank_zero_only
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: pl.Trainer , _UpperCAmelCase: Dict ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid") | 687 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ):
"""simple docstring"""
if isinstance(__magic_name__ , torch.Tensor ):
return image
elif isinstance(__magic_name__ , PIL.Image.Image ):
_lowerCAmelCase :Tuple = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowerCAmelCase :List[Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_lowerCAmelCase :Optional[Any] = np.concatenate(__magic_name__ , axis=0 )
_lowerCAmelCase :Any = np.array(__magic_name__ ).astype(np.floataa ) / 255.0
_lowerCAmelCase :Optional[int] = image.transpose(0 , 3 , 1 , 2 )
_lowerCAmelCase :int = 2.0 * image - 1.0
_lowerCAmelCase :Optional[int] = torch.from_numpy(__magic_name__ )
elif isinstance(image[0] , torch.Tensor ):
_lowerCAmelCase :str = torch.cat(__magic_name__ , dim=0 )
return image
def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : int=0.9995 ):
"""simple docstring"""
if not isinstance(__magic_name__ , np.ndarray ):
_lowerCAmelCase :Tuple = True
_lowerCAmelCase :str = va.device
_lowerCAmelCase :List[str] = va.cpu().numpy()
_lowerCAmelCase :List[str] = va.cpu().numpy()
_lowerCAmelCase :Any = np.sum(va * va / (np.linalg.norm(__magic_name__ ) * np.linalg.norm(__magic_name__ )) )
if np.abs(__magic_name__ ) > DOT_THRESHOLD:
_lowerCAmelCase :Optional[Any] = (1 - t) * va + t * va
else:
_lowerCAmelCase :int = np.arccos(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = np.sin(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = theta_a * t
_lowerCAmelCase :str = np.sin(__magic_name__ )
_lowerCAmelCase :Any = np.sin(theta_a - theta_t ) / sin_theta_a
_lowerCAmelCase :Optional[Any] = sin_theta_t / sin_theta_a
_lowerCAmelCase :List[Any] = sa * va + sa * va
if inputs_are_torch:
_lowerCAmelCase :int = torch.from_numpy(__magic_name__ ).to(__magic_name__ )
return va
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :Any = F.normalize(__magic_name__ , dim=-1 )
_lowerCAmelCase :str = F.normalize(__magic_name__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
for param in model.parameters():
_lowerCAmelCase :List[str] = value
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def __init__( self: Any , _UpperCAmelCase: AutoencoderKL , _UpperCAmelCase: CLIPTextModel , _UpperCAmelCase: CLIPModel , _UpperCAmelCase: CLIPTokenizer , _UpperCAmelCase: UNetaDConditionModel , _UpperCAmelCase: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , _UpperCAmelCase: CLIPFeatureExtractor , _UpperCAmelCase: str=None , _UpperCAmelCase: Tuple=None , _UpperCAmelCase: Union[str, Any]=None , ):
super().__init__()
self.register_modules(
vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , clip_model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , coca_model=_UpperCAmelCase , coca_tokenizer=_UpperCAmelCase , coca_transform=_UpperCAmelCase , )
_lowerCAmelCase :int = (
feature_extractor.size
if isinstance(feature_extractor.size , _UpperCAmelCase )
else feature_extractor.size['shortest_edge']
)
_lowerCAmelCase :Union[str, Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _UpperCAmelCase )
set_requires_grad(self.clip_model , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowerCAmelCase :Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
self.enable_attention_slicing(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
set_requires_grad(self.vae , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
set_requires_grad(self.vae , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
set_requires_grad(self.unet , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
set_requires_grad(self.unet , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Dict ):
# get the original timestep using init_timestep
_lowerCAmelCase :Optional[Any] = min(int(num_inference_steps * strength ) , _UpperCAmelCase )
_lowerCAmelCase :List[str] = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase :Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Union[str, Any]=None ):
if not isinstance(_UpperCAmelCase , torch.Tensor ):
raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(_UpperCAmelCase )}""" )
_lowerCAmelCase :Union[str, Any] = image.to(device=_UpperCAmelCase , dtype=_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :List[Any] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCAmelCase )
]
_lowerCAmelCase :List[str] = torch.cat(_UpperCAmelCase , dim=0 )
else:
_lowerCAmelCase :List[str] = self.vae.encode(_UpperCAmelCase ).latent_dist.sample(_UpperCAmelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :List[Any] = 0.1_8_2_1_5 * init_latents
_lowerCAmelCase :List[Any] = init_latents.repeat_interleave(_UpperCAmelCase , dim=0 )
_lowerCAmelCase :Dict = randn_tensor(init_latents.shape , generator=_UpperCAmelCase , device=_UpperCAmelCase , dtype=_UpperCAmelCase )
# get latents
_lowerCAmelCase :Dict = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :List[str] = init_latents
return latents
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Union[str, Any] ):
_lowerCAmelCase :Optional[int] = self.coca_transform(_UpperCAmelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_lowerCAmelCase :Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_lowerCAmelCase :int = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: List[str] ):
_lowerCAmelCase :Optional[int] = self.feature_extractor.preprocess(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
_lowerCAmelCase :List[str] = self.clip_model.get_image_features(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase )
_lowerCAmelCase :Dict = image_embeddings_clip.repeat_interleave(_UpperCAmelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , _UpperCAmelCase: Dict , _UpperCAmelCase: str , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , ):
_lowerCAmelCase :Dict = latents.detach().requires_grad_()
_lowerCAmelCase :Optional[Any] = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
_lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_lowerCAmelCase :int = self.scheduler.alphas_cumprod[timestep]
_lowerCAmelCase :Optional[int] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase :str = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_lowerCAmelCase :Optional[Any] = torch.sqrt(_UpperCAmelCase )
_lowerCAmelCase :List[str] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _UpperCAmelCase ):
_lowerCAmelCase :Dict = self.scheduler.sigmas[index]
_lowerCAmelCase :Optional[Any] = latents - sigma * noise_pred
else:
raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :Tuple = 1 / 0.1_8_2_1_5 * sample
_lowerCAmelCase :Optional[Any] = self.vae.decode(_UpperCAmelCase ).sample
_lowerCAmelCase :List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase :Tuple = transforms.Resize(self.feature_extractor_size )(_UpperCAmelCase )
_lowerCAmelCase :Tuple = self.normalize(_UpperCAmelCase ).to(latents.dtype )
_lowerCAmelCase :List[Any] = self.clip_model.get_image_features(_UpperCAmelCase )
_lowerCAmelCase :List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase )
_lowerCAmelCase :Tuple = spherical_dist_loss(_UpperCAmelCase , _UpperCAmelCase ).mean() * clip_guidance_scale
_lowerCAmelCase :str = -torch.autograd.grad(_UpperCAmelCase , _UpperCAmelCase )[0]
if isinstance(self.scheduler , _UpperCAmelCase ):
_lowerCAmelCase :Union[str, Any] = latents.detach() + grads * (sigma**2)
_lowerCAmelCase :Dict = noise_pred_original
else:
_lowerCAmelCase :Optional[int] = noise_pred_original - torch.sqrt(_UpperCAmelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self: Optional[int] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: float = 0.6 , _UpperCAmelCase: Optional[int] = 50 , _UpperCAmelCase: Optional[float] = 7.5 , _UpperCAmelCase: Optional[int] = 1 , _UpperCAmelCase: float = 0.0 , _UpperCAmelCase: Optional[float] = 100 , _UpperCAmelCase: Optional[torch.Generator] = None , _UpperCAmelCase: Optional[str] = "pil" , _UpperCAmelCase: bool = True , _UpperCAmelCase: float = 0.8 , _UpperCAmelCase: float = 0.1 , _UpperCAmelCase: float = 0.1 , ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size:
raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(_UpperCAmelCase )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(_UpperCAmelCase , torch.Generator ) and batch_size > 1:
_lowerCAmelCase :int = [generator] + [None] * (batch_size - 1)
_lowerCAmelCase :List[Any] = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
_lowerCAmelCase :Optional[int] = [x[0] for x in coca_is_none if x[1]]
_lowerCAmelCase :List[str] = ', '.join(_UpperCAmelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_UpperCAmelCase ):
raise ValueError(
f"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowerCAmelCase :List[Any] = self.get_image_description(_UpperCAmelCase )
if style_prompt is None:
if len(_UpperCAmelCase ):
raise ValueError(
f"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowerCAmelCase :Any = self.get_image_description(_UpperCAmelCase )
# get prompt text embeddings for content and style
_lowerCAmelCase :Any = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , )
_lowerCAmelCase :str = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_lowerCAmelCase :int = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , )
_lowerCAmelCase :Union[str, Any] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_lowerCAmelCase :List[str] = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# duplicate text embeddings for each generation per prompt
_lowerCAmelCase :str = text_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 )
# set timesteps
_lowerCAmelCase :Any = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_lowerCAmelCase :Dict = {}
if accepts_offset:
_lowerCAmelCase :Optional[int] = 1
self.scheduler.set_timesteps(_UpperCAmelCase , **_UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_lowerCAmelCase , _lowerCAmelCase :List[str] = self.get_timesteps(_UpperCAmelCase , _UpperCAmelCase , self.device )
_lowerCAmelCase :int = timesteps[:1].repeat(_UpperCAmelCase )
# Preprocess image
_lowerCAmelCase :Dict = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :int = self.prepare_latents(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase )
_lowerCAmelCase :Any = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.prepare_latents(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase )
_lowerCAmelCase :str = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if clip_guidance_scale > 0:
_lowerCAmelCase :Optional[Any] = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Dict = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Any = slerp(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCAmelCase :int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase :Optional[int] = content_text_input.input_ids.shape[-1]
_lowerCAmelCase :Union[str, Any] = self.tokenizer([''] , padding='max_length' , max_length=_UpperCAmelCase , return_tensors='pt' )
_lowerCAmelCase :Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_lowerCAmelCase :Optional[int] = uncond_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase :int = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCAmelCase :Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_lowerCAmelCase :Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_lowerCAmelCase :Any = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device='cpu' , dtype=_UpperCAmelCase ).to(
self.device )
else:
_lowerCAmelCase :List[Any] = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_lowerCAmelCase :int = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase :Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase :Any = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase :Any = {}
if accepts_eta:
_lowerCAmelCase :Any = eta
# check if the scheduler accepts generator
_lowerCAmelCase :List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_lowerCAmelCase :List[Any] = generator
with self.progress_bar(total=_UpperCAmelCase ):
for i, t in enumerate(_UpperCAmelCase ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase :Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase :Tuple = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
_lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase :List[str] = noise_pred.chunk(2 )
_lowerCAmelCase :Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_lowerCAmelCase :List[Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_lowerCAmelCase , _lowerCAmelCase :List[str] = self.cond_fn(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase :str = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :str = 1 / 0.1_8_2_1_5 * latents
_lowerCAmelCase :Any = self.vae.decode(_UpperCAmelCase ).sample
_lowerCAmelCase :List[str] = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase :Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase :List[Any] = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_UpperCAmelCase , nsfw_content_detected=_UpperCAmelCase ) | 687 | 1 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
a = HfApi()
a = {}
# fmt: off
a = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
a = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
a = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
a = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
a = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
a = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
a = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
a = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
a = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
a = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
a = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
a = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
a = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
a = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
a = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
a = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
a = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith("""CompVis"""):
a = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
a = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
a = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
a = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
a = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3
)
print(F'''{mod.modelId} has passed successfully!!!''') | 687 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :Optional[int] = list(__magic_name__ )
_lowerCAmelCase :Dict = list(__magic_name__ )
_lowerCAmelCase :Any = 0
for i in range(len(__magic_name__ ) ):
if lista[i] != lista[i]:
count += 1
_lowerCAmelCase :Union[str, Any] = '_'
if count > 1:
return False
else:
return "".join(__magic_name__ )
def UpperCamelCase_( __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :int = []
while True:
_lowerCAmelCase :str = ['$'] * len(__magic_name__ )
_lowerCAmelCase :Optional[int] = []
for i in range(len(__magic_name__ ) ):
for j in range(i + 1 , len(__magic_name__ ) ):
_lowerCAmelCase :int = compare_string(binary[i] , binary[j] )
if k is False:
_lowerCAmelCase :str = '*'
_lowerCAmelCase :Union[str, Any] = '*'
temp.append('X' )
for i in range(len(__magic_name__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__magic_name__ ) == 0:
return pi
_lowerCAmelCase :Any = list(set(__magic_name__ ) )
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : Sequence[float] ):
"""simple docstring"""
_lowerCAmelCase :str = []
for minterm in minterms:
_lowerCAmelCase :Any = ''
for _ in range(__magic_name__ ):
_lowerCAmelCase :Tuple = str(minterm % 2 ) + string
minterm //= 2
temp.append(__magic_name__ )
return temp
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str , __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = list(__magic_name__ )
_lowerCAmelCase :List[Any] = list(__magic_name__ )
_lowerCAmelCase :Optional[Any] = 0
for i in range(len(__magic_name__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase_( __magic_name__ : list[list[int]] , __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :str = []
_lowerCAmelCase :List[str] = [0] * len(__magic_name__ )
for i in range(len(chart[0] ) ):
_lowerCAmelCase :Dict = 0
_lowerCAmelCase :Optional[Any] = -1
for j in range(len(__magic_name__ ) ):
if chart[j][i] == 1:
count += 1
_lowerCAmelCase :List[Any] = j
if count == 1:
_lowerCAmelCase :Dict = 1
for i in range(len(__magic_name__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__magic_name__ ) ):
_lowerCAmelCase :Dict = 0
temp.append(prime_implicants[i] )
while True:
_lowerCAmelCase :Dict = 0
_lowerCAmelCase :Any = -1
_lowerCAmelCase :Optional[Any] = 0
for i in range(len(__magic_name__ ) ):
_lowerCAmelCase :str = chart[i].count(1 )
if count_n > max_n:
_lowerCAmelCase :Optional[Any] = count_n
_lowerCAmelCase :Dict = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__magic_name__ ) ):
_lowerCAmelCase :str = 0
def UpperCamelCase_( __magic_name__ : list[str] , __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :str = [[0 for x in range(len(__magic_name__ ) )] for x in range(len(__magic_name__ ) )]
for i in range(len(__magic_name__ ) ):
_lowerCAmelCase :Tuple = prime_implicants[i].count('_' )
for j in range(len(__magic_name__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , __magic_name__ ):
_lowerCAmelCase :str = 1
return chart
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :Tuple = int(input('Enter the no. of variables\n' ) )
_lowerCAmelCase :Tuple = [
float(__magic_name__ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
_lowerCAmelCase :List[str] = decimal_to_binary(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Any = check(__magic_name__ )
print('Prime Implicants are:' )
print(__magic_name__ )
_lowerCAmelCase :List[Any] = prime_implicant_chart(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Tuple = selection(__magic_name__ , __magic_name__ )
print('Essential Prime Implicants are:' )
print(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 687 | 1 |
from PIL import Image
def UpperCamelCase_( __magic_name__ : Image , __magic_name__ : float ):
"""simple docstring"""
def brightness(__magic_name__ : int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(__magic_name__ )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
a = change_brightness(img, 100)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""") | 687 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
a = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
a = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
a = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[int]=4 , _UpperCAmelCase: Optional[int]=False ):
_lowerCAmelCase :Any = compute_bleu(
reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) :Tuple = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
} | 687 | 1 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : List[Any] ):
"""simple docstring"""
_lowerCAmelCase :int = checkpoint
_lowerCAmelCase :Union[str, Any] = {}
_lowerCAmelCase :Optional[Any] = vae_state_dict['encoder.conv_in.weight']
_lowerCAmelCase :str = vae_state_dict['encoder.conv_in.bias']
_lowerCAmelCase :Any = vae_state_dict['encoder.conv_out.weight']
_lowerCAmelCase :Optional[Any] = vae_state_dict['encoder.conv_out.bias']
_lowerCAmelCase :int = vae_state_dict['encoder.norm_out.weight']
_lowerCAmelCase :int = vae_state_dict['encoder.norm_out.bias']
_lowerCAmelCase :str = vae_state_dict['decoder.conv_in.weight']
_lowerCAmelCase :Union[str, Any] = vae_state_dict['decoder.conv_in.bias']
_lowerCAmelCase :int = vae_state_dict['decoder.conv_out.weight']
_lowerCAmelCase :Optional[Any] = vae_state_dict['decoder.conv_out.bias']
_lowerCAmelCase :Union[str, Any] = vae_state_dict['decoder.norm_out.weight']
_lowerCAmelCase :Tuple = vae_state_dict['decoder.norm_out.bias']
_lowerCAmelCase :Optional[int] = vae_state_dict['quant_conv.weight']
_lowerCAmelCase :Dict = vae_state_dict['quant_conv.bias']
_lowerCAmelCase :List[Any] = vae_state_dict['post_quant_conv.weight']
_lowerCAmelCase :Any = vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
_lowerCAmelCase :List[Any] = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
_lowerCAmelCase :List[str] = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(__magic_name__ )
}
# Retrieves the keys for the decoder up blocks only
_lowerCAmelCase :Optional[Any] = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
_lowerCAmelCase :int = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(__magic_name__ )
}
for i in range(__magic_name__ ):
_lowerCAmelCase :str = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
_lowerCAmelCase :str = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
_lowerCAmelCase :Optional[int] = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
_lowerCAmelCase :str = renew_vae_resnet_paths(__magic_name__ )
_lowerCAmelCase :Any = {'old': f"""down.{i}.block""", 'new': f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(__magic_name__ , __magic_name__ , __magic_name__ , additional_replacements=[meta_path] , config=__magic_name__ )
_lowerCAmelCase :Union[str, Any] = [key for key in vae_state_dict if 'encoder.mid.block' in key]
_lowerCAmelCase :List[Any] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
_lowerCAmelCase :List[str] = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
_lowerCAmelCase :List[str] = renew_vae_resnet_paths(__magic_name__ )
_lowerCAmelCase :Optional[int] = {'old': f"""mid.block_{i}""", 'new': f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__magic_name__ , __magic_name__ , __magic_name__ , additional_replacements=[meta_path] , config=__magic_name__ )
_lowerCAmelCase :Dict = [key for key in vae_state_dict if 'encoder.mid.attn' in key]
_lowerCAmelCase :List[Any] = renew_vae_attention_paths(__magic_name__ )
_lowerCAmelCase :List[Any] = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(__magic_name__ , __magic_name__ , __magic_name__ , additional_replacements=[meta_path] , config=__magic_name__ )
conv_attn_to_linear(__magic_name__ )
for i in range(__magic_name__ ):
_lowerCAmelCase :str = num_up_blocks - 1 - i
_lowerCAmelCase :Optional[int] = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
_lowerCAmelCase :Any = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
_lowerCAmelCase :Any = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
_lowerCAmelCase :List[Any] = renew_vae_resnet_paths(__magic_name__ )
_lowerCAmelCase :Optional[Any] = {'old': f"""up.{block_id}.block""", 'new': f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(__magic_name__ , __magic_name__ , __magic_name__ , additional_replacements=[meta_path] , config=__magic_name__ )
_lowerCAmelCase :Optional[int] = [key for key in vae_state_dict if 'decoder.mid.block' in key]
_lowerCAmelCase :Dict = 2
for i in range(1 , num_mid_res_blocks + 1 ):
_lowerCAmelCase :int = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
_lowerCAmelCase :List[str] = renew_vae_resnet_paths(__magic_name__ )
_lowerCAmelCase :Optional[Any] = {'old': f"""mid.block_{i}""", 'new': f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__magic_name__ , __magic_name__ , __magic_name__ , additional_replacements=[meta_path] , config=__magic_name__ )
_lowerCAmelCase :str = [key for key in vae_state_dict if 'decoder.mid.attn' in key]
_lowerCAmelCase :List[Any] = renew_vae_attention_paths(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(__magic_name__ , __magic_name__ , __magic_name__ , additional_replacements=[meta_path] , config=__magic_name__ )
conv_attn_to_linear(__magic_name__ )
return new_checkpoint
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str , ):
"""simple docstring"""
_lowerCAmelCase :Optional[int] = requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
_lowerCAmelCase :Dict = io.BytesIO(r.content )
_lowerCAmelCase :Tuple = OmegaConf.load(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = 512
_lowerCAmelCase :Tuple = 'cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
_lowerCAmelCase :Optional[Any] = {}
with safe_open(__magic_name__ , framework='pt' , device='cpu' ) as f:
for key in f.keys():
_lowerCAmelCase :Optional[Any] = f.get_tensor(__magic_name__ )
else:
_lowerCAmelCase :Any = torch.load(__magic_name__ , map_location=__magic_name__ )['state_dict']
# Convert the VAE model.
_lowerCAmelCase :Optional[int] = create_vae_diffusers_config(__magic_name__ , image_size=__magic_name__ )
_lowerCAmelCase :List[Any] = custom_convert_ldm_vae_checkpoint(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Optional[Any] = AutoencoderKL(**__magic_name__ )
vae.load_state_dict(__magic_name__ )
vae.save_pretrained(__magic_name__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
a = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path) | 687 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 | 1 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :int = 10
_lowerCAmelCase :List[str] = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
_lowerCAmelCase :Dict = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(__magic_name__ ) ),
} , features=__magic_name__ , )
return dataset
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Dict ):
"""simple docstring"""
_lowerCAmelCase :Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=__magic_name__ )
return filename
# FILE_CONTENT + files
a = """\
Text data.
Second line of data."""
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : Optional[int] ):
"""simple docstring"""
_lowerCAmelCase :Dict = tmp_path_factory.mktemp('data' ) / 'file.txt'
_lowerCAmelCase :List[Any] = FILE_CONTENT
with open(__magic_name__ , 'w' ) as f:
f.write(__magic_name__ )
return filename
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : Any ):
"""simple docstring"""
import bza
_lowerCAmelCase :Dict = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
_lowerCAmelCase :List[str] = bytes(__magic_name__ , 'utf-8' )
with bza.open(__magic_name__ , 'wb' ) as f:
f.write(__magic_name__ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : Dict ):
"""simple docstring"""
import gzip
_lowerCAmelCase :List[Any] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
_lowerCAmelCase :Dict = bytes(__magic_name__ , 'utf-8' )
with gzip.open(__magic_name__ , 'wb' ) as f:
f.write(__magic_name__ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : List[str] ):
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_lowerCAmelCase :Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
_lowerCAmelCase :List[Any] = bytes(__magic_name__ , 'utf-8' )
with lza.frame.open(__magic_name__ , 'wb' ) as f:
f.write(__magic_name__ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : List[Any] , __magic_name__ : List[Any] ):
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_lowerCAmelCase :Any = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(__magic_name__ , 'w' ) as archive:
archive.write(__magic_name__ , arcname=os.path.basename(__magic_name__ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : Tuple ):
"""simple docstring"""
import tarfile
_lowerCAmelCase :Any = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(__magic_name__ , 'w' ) as f:
f.add(__magic_name__ , arcname=os.path.basename(__magic_name__ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : Tuple ):
"""simple docstring"""
import lzma
_lowerCAmelCase :Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
_lowerCAmelCase :Any = bytes(__magic_name__ , 'utf-8' )
with lzma.open(__magic_name__ , 'wb' ) as f:
f.write(__magic_name__ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : Any , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
import zipfile
_lowerCAmelCase :Dict = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(__magic_name__ , 'w' ) as f:
f.write(__magic_name__ , arcname=os.path.basename(__magic_name__ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_lowerCAmelCase :List[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
_lowerCAmelCase :Any = bytes(__magic_name__ , 'utf-8' )
with zstd.open(__magic_name__ , 'wb' ) as f:
f.write(__magic_name__ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : Tuple ):
"""simple docstring"""
_lowerCAmelCase :Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.xml'
_lowerCAmelCase :Union[str, Any] = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(__magic_name__ , 'w' ) as f:
f.write(__magic_name__ )
return filename
a = [
{"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0},
]
a = [
{"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0},
{"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0},
]
a = {
"""col_1""": ["""0""", """1""", """2""", """3"""],
"""col_2""": [0, 1, 2, 3],
"""col_3""": [0.0, 1.0, 2.0, 3.0],
}
a = [
{"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0},
{"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1},
]
a = [
{"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0},
]
@pytest.fixture(scope='session' )
def UpperCamelCase_( ):
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : Optional[int] ):
"""simple docstring"""
_lowerCAmelCase :Optional[int] = datasets.Dataset.from_dict(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=__magic_name__ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :Any = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(__magic_name__ ) ) as con:
_lowerCAmelCase :Union[str, Any] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : Dict ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(__magic_name__ , 'w' , newline='' ) as f:
_lowerCAmelCase :Dict = csv.DictWriter(__magic_name__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__magic_name__ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : List[str] ):
"""simple docstring"""
_lowerCAmelCase :int = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(__magic_name__ , 'w' , newline='' ) as f:
_lowerCAmelCase :Optional[int] = csv.DictWriter(__magic_name__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__magic_name__ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : Optional[int] ):
"""simple docstring"""
import bza
_lowerCAmelCase :Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(__magic_name__ , 'rb' ) as f:
_lowerCAmelCase :Dict = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__magic_name__ , 'wb' ) as f:
f.write(__magic_name__ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Any , __magic_name__ : Dict ):
"""simple docstring"""
_lowerCAmelCase :Dict = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__magic_name__ , 'w' ) as f:
f.write(__magic_name__ , arcname=os.path.basename(__magic_name__ ) )
f.write(__magic_name__ , arcname=os.path.basename(__magic_name__ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :str = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__magic_name__ , 'w' ) as f:
f.write(__magic_name__ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(__magic_name__ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Any ):
"""simple docstring"""
_lowerCAmelCase :Dict = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(__magic_name__ , 'w' ) as f:
f.write(__magic_name__ , arcname=os.path.join('main_dir' , os.path.basename(__magic_name__ ) ) )
f.write(__magic_name__ , arcname=os.path.join('main_dir' , os.path.basename(__magic_name__ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : List[str] ):
"""simple docstring"""
_lowerCAmelCase :Any = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
_lowerCAmelCase :Optional[Any] = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(__magic_name__ , 'wb' ) as f:
_lowerCAmelCase :str = pq.ParquetWriter(__magic_name__ , schema=__magic_name__ )
_lowerCAmelCase :Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__magic_name__ ) )] for k in DATA[0]} , schema=__magic_name__ )
writer.write_table(__magic_name__ )
writer.close()
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_lowerCAmelCase :Union[str, Any] = {'data': DATA}
with open(__magic_name__ , 'w' ) as f:
json.dump(__magic_name__ , __magic_name__ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_lowerCAmelCase :int = {'data': DATA_DICT_OF_LISTS}
with open(__magic_name__ , 'w' ) as f:
json.dump(__magic_name__ , __magic_name__ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :str = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(__magic_name__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__magic_name__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : List[Any] ):
"""simple docstring"""
_lowerCAmelCase :Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(__magic_name__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__magic_name__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(__magic_name__ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(__magic_name__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : Tuple ):
"""simple docstring"""
_lowerCAmelCase :Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(__magic_name__ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(__magic_name__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : Dict , __magic_name__ : List[Any] ):
"""simple docstring"""
import gzip
_lowerCAmelCase :Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(__magic_name__ , 'rb' ) as orig_file:
with gzip.open(__magic_name__ , 'wb' ) as zipped_file:
zipped_file.writelines(__magic_name__ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : List[str] , __magic_name__ : Any ):
"""simple docstring"""
import gzip
_lowerCAmelCase :Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(__magic_name__ , 'rb' ) as orig_file:
with gzip.open(__magic_name__ , 'wb' ) as zipped_file:
zipped_file.writelines(__magic_name__ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : Tuple ):
"""simple docstring"""
_lowerCAmelCase :Tuple = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(__magic_name__ , 'w' ) as f:
f.write(__magic_name__ , arcname=os.path.basename(__magic_name__ ) )
f.write(__magic_name__ , arcname=os.path.basename(__magic_name__ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(__magic_name__ , 'w' ) as f:
f.write(__magic_name__ , arcname=os.path.join('nested' , os.path.basename(__magic_name__ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(__magic_name__ , 'w' ) as f:
f.write(__magic_name__ , arcname=os.path.join('main_dir' , os.path.basename(__magic_name__ ) ) )
f.write(__magic_name__ , arcname=os.path.join('main_dir' , os.path.basename(__magic_name__ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Any ):
"""simple docstring"""
_lowerCAmelCase :Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(__magic_name__ , 'w' ) as f:
f.add(__magic_name__ , arcname=os.path.basename(__magic_name__ ) )
f.add(__magic_name__ , arcname=os.path.basename(__magic_name__ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : Dict ):
"""simple docstring"""
_lowerCAmelCase :int = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(__magic_name__ , 'w' ) as f:
f.add(__magic_name__ , arcname=os.path.join('nested' , os.path.basename(__magic_name__ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :List[str] = ['0', '1', '2', '3']
_lowerCAmelCase :str = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(__magic_name__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :Tuple = ['0', '1', '2', '3']
_lowerCAmelCase :Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(__magic_name__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : List[Any] ):
"""simple docstring"""
_lowerCAmelCase :str = ['0', '1', '2', '3']
_lowerCAmelCase :Tuple = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(__magic_name__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Dict ):
"""simple docstring"""
_lowerCAmelCase :List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(__magic_name__ , 'w' ) as f:
f.write(__magic_name__ , arcname=os.path.basename(__magic_name__ ) )
f.write(__magic_name__ , arcname=os.path.basename(__magic_name__ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(__magic_name__ , 'w' ) as f:
f.write(__magic_name__ , arcname=os.path.join('main_dir' , os.path.basename(__magic_name__ ) ) )
f.write(__magic_name__ , arcname=os.path.join('main_dir' , os.path.basename(__magic_name__ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : int , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(__magic_name__ , 'w' ) as f:
f.write(__magic_name__ , arcname=os.path.basename('unsupported.ext' ) )
f.write(__magic_name__ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : List[Any] ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
_lowerCAmelCase :List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(__magic_name__ , 'w' , encoding='utf-8' ) as f:
f.write(__magic_name__ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( ):
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def UpperCamelCase_( ):
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : List[Any] , __magic_name__ : Tuple ):
"""simple docstring"""
_lowerCAmelCase :Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(__magic_name__ , 'w' ) as f:
f.write(__magic_name__ , arcname=os.path.basename(__magic_name__ ) )
f.write(__magic_name__ , arcname=os.path.basename(__magic_name__ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :Union[str, Any] = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir | 687 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self: str , _UpperCAmelCase: str , _UpperCAmelCase: Optional[int]=7 , _UpperCAmelCase: Union[str, Any]=3 , _UpperCAmelCase: int=18 , _UpperCAmelCase: List[Any]=30 , _UpperCAmelCase: List[Any]=400 , _UpperCAmelCase: Optional[Any]=True , _UpperCAmelCase: Any=None , _UpperCAmelCase: Any=True , _UpperCAmelCase: int=None , _UpperCAmelCase: Union[str, Any]=True , ):
_lowerCAmelCase :Tuple = size if size is not None else {'shortest_edge': 20}
_lowerCAmelCase :str = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_lowerCAmelCase :str = parent
_lowerCAmelCase :List[Any] = batch_size
_lowerCAmelCase :Optional[Any] = num_channels
_lowerCAmelCase :Optional[Any] = image_size
_lowerCAmelCase :int = min_resolution
_lowerCAmelCase :List[str] = max_resolution
_lowerCAmelCase :List[str] = do_resize
_lowerCAmelCase :Optional[int] = size
_lowerCAmelCase :str = do_center_crop
_lowerCAmelCase :int = crop_size
_lowerCAmelCase :Optional[int] = do_flip_channel_order
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Any = MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = MobileViTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self: str ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'center_crop' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_flip_channel_order' ) )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_lowerCAmelCase :Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self: int ):
# Initialize image_processing
_lowerCAmelCase :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
_lowerCAmelCase :Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :str = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
# Initialize image_processing
_lowerCAmelCase :int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :List[str] = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
# Initialize image_processing
_lowerCAmelCase :Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :int = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , ) | 687 | 1 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
a = logging.get_logger(__name__)
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def __init__( self: List[str] , *_UpperCAmelCase: List[Any] , **_UpperCAmelCase: List[str] ):
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) | 687 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ (datasets.BuilderConfig ):
"""simple docstring"""
lowerCamelCase : Optional[datasets.Features] = None
class UpperCAmelCase_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCamelCase : Any = PandasConfig
def SCREAMING_SNAKE_CASE__ ( self: int ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: List[str] ):
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_lowerCAmelCase :Dict = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_UpperCAmelCase , (str, list, tuple) ):
_lowerCAmelCase :Any = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase :List[Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
_lowerCAmelCase :Any = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase :Union[str, Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={'files': files} ) )
return splits
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: pa.Table ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase :str = table_cast(_UpperCAmelCase , self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: Dict ):
for i, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase ) ):
with open(_UpperCAmelCase , 'rb' ) as f:
_lowerCAmelCase :Optional[Any] = pa.Table.from_pandas(pd.read_pickle(_UpperCAmelCase ) )
yield i, self._cast_table(_UpperCAmelCase ) | 687 | 1 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_lowerCAmelCase :int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_lowerCAmelCase :List[str] = 'xvjiarui/stable-diffusion-2-inpainting'
_lowerCAmelCase , _lowerCAmelCase :Tuple = FlaxStableDiffusionInpaintPipeline.from_pretrained(_UpperCAmelCase , safety_checker=_UpperCAmelCase )
_lowerCAmelCase :List[str] = 'Face of a yellow cat, high resolution, sitting on a park bench'
_lowerCAmelCase :Any = jax.random.PRNGKey(0 )
_lowerCAmelCase :Any = 50
_lowerCAmelCase :Optional[int] = jax.device_count()
_lowerCAmelCase :List[Any] = num_samples * [prompt]
_lowerCAmelCase :Dict = num_samples * [init_image]
_lowerCAmelCase :Dict = num_samples * [mask_image]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :List[Any] = pipeline.prepare_inputs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# shard inputs and rng
_lowerCAmelCase :Any = replicate(_UpperCAmelCase )
_lowerCAmelCase :Tuple = jax.random.split(_UpperCAmelCase , jax.device_count() )
_lowerCAmelCase :Union[str, Any] = shard(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = shard(_UpperCAmelCase )
_lowerCAmelCase :List[str] = shard(_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = pipeline(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase )
_lowerCAmelCase :Dict = output.images.reshape(_UpperCAmelCase , 512 , 512 , 3 )
_lowerCAmelCase :Optional[Any] = images[0, 253:256, 253:256, -1]
_lowerCAmelCase :str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCAmelCase :Optional[int] = jnp.array(
[0.3_6_1_1_3_0_7, 0.3_7_6_4_9_7_3_6, 0.3_7_5_7_4_0_8, 0.3_8_2_1_3_9_5_3, 0.3_9_2_9_5_1_6_7, 0.3_8_4_1_6_3_1, 0.4_1_5_5_4_9_7_8, 0.4_1_3_7_4_7_5, 0.4_2_1_7_0_8_4] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 | 687 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
a = """"""
a = """"""
a = """"""
a = 1 # (0 is vertical, 1 is horizontal)
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase :Union[str, Any] = get_dataset(__magic_name__ , __magic_name__ )
print('Processing...' )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = update_image_and_anno(__magic_name__ , __magic_name__ , __magic_name__ )
for index, image in enumerate(__magic_name__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase :Optional[Any] = random_chars(32 )
_lowerCAmelCase :str = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
_lowerCAmelCase :Tuple = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , __magic_name__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(__magic_name__ )} with {file_name}""" )
_lowerCAmelCase :str = []
for anno in new_annos[index]:
_lowerCAmelCase :List[str] = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(__magic_name__ )
with open(f"""/{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :int = []
_lowerCAmelCase :Union[str, Any] = []
for label_file in glob.glob(os.path.join(__magic_name__ , '*.txt' ) ):
_lowerCAmelCase :Optional[int] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(__magic_name__ ) as in_file:
_lowerCAmelCase :Union[str, Any] = in_file.readlines()
_lowerCAmelCase :List[Any] = os.path.join(__magic_name__ , f"""{label_name}.jpg""" )
_lowerCAmelCase :Tuple = []
for obj_list in obj_lists:
_lowerCAmelCase :Union[str, Any] = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__magic_name__ )
labels.append(__magic_name__ )
return img_paths, labels
def UpperCamelCase_( __magic_name__ : list , __magic_name__ : list , __magic_name__ : int = 1 ):
"""simple docstring"""
_lowerCAmelCase :str = []
_lowerCAmelCase :Any = []
_lowerCAmelCase :Optional[Any] = []
for idx in range(len(__magic_name__ ) ):
_lowerCAmelCase :Optional[int] = []
_lowerCAmelCase :Optional[Any] = img_list[idx]
path_list.append(__magic_name__ )
_lowerCAmelCase :List[str] = anno_list[idx]
_lowerCAmelCase :Optional[Any] = cva.imread(__magic_name__ )
if flip_type == 1:
_lowerCAmelCase :List[Any] = cva.flip(__magic_name__ , __magic_name__ )
for bbox in img_annos:
_lowerCAmelCase :List[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_lowerCAmelCase :List[str] = cva.flip(__magic_name__ , __magic_name__ )
for bbox in img_annos:
_lowerCAmelCase :List[str] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__magic_name__ )
new_imgs_list.append(__magic_name__ )
return new_imgs_list, new_annos_lists, path_list
def UpperCamelCase_( __magic_name__ : int = 32 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase :str = ascii_lowercase + digits
return "".join(random.choice(__magic_name__ ) for _ in range(__magic_name__ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""") | 687 | 1 |
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
a = int(input("""Enter number: """).strip())
print(F'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''') | 687 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
a = logging.get_logger(__name__)
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = nn.functional.normalize(__magic_name__ )
_lowerCAmelCase :List[str] = nn.functional.normalize(__magic_name__ )
return torch.mm(__magic_name__ , normalized_text_embeds.t() )
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : str = CLIPConfig
lowerCamelCase : Any = ['CLIPEncoderLayer']
def __init__( self: Optional[int] , _UpperCAmelCase: CLIPConfig ):
super().__init__(_UpperCAmelCase )
_lowerCAmelCase :Any = CLIPVisionModel(config.vision_config )
_lowerCAmelCase :Optional[int] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_UpperCAmelCase )
_lowerCAmelCase :int = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :Any = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :str = nn.Parameter(torch.ones(17 ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = nn.Parameter(torch.ones(3 ) , requires_grad=_UpperCAmelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Dict ):
_lowerCAmelCase :str = self.vision_model(_UpperCAmelCase )[1] # pooled_output
_lowerCAmelCase :Union[str, Any] = self.visual_projection(_UpperCAmelCase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase :Optional[int] = cosine_distance(_UpperCAmelCase , self.special_care_embeds ).cpu().float().numpy()
_lowerCAmelCase :List[str] = cosine_distance(_UpperCAmelCase , self.concept_embeds ).cpu().float().numpy()
_lowerCAmelCase :str = []
_lowerCAmelCase :List[Any] = image_embeds.shape[0]
for i in range(_UpperCAmelCase ):
_lowerCAmelCase :Optional[Any] = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
_lowerCAmelCase :List[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
_lowerCAmelCase :List[Any] = special_cos_dist[i][concept_idx]
_lowerCAmelCase :Dict = self.special_care_embeds_weights[concept_idx].item()
_lowerCAmelCase :List[Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} )
_lowerCAmelCase :Any = 0.0_1
for concept_idx in range(len(cos_dist[0] ) ):
_lowerCAmelCase :Union[str, Any] = cos_dist[i][concept_idx]
_lowerCAmelCase :str = self.concept_embeds_weights[concept_idx].item()
_lowerCAmelCase :str = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_UpperCAmelCase )
result.append(_UpperCAmelCase )
_lowerCAmelCase :Any = [len(res['bad_concepts'] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self: str , _UpperCAmelCase: torch.FloatTensor , _UpperCAmelCase: torch.FloatTensor ):
_lowerCAmelCase :Optional[int] = self.vision_model(_UpperCAmelCase )[1] # pooled_output
_lowerCAmelCase :Union[str, Any] = self.visual_projection(_UpperCAmelCase )
_lowerCAmelCase :Dict = cosine_distance(_UpperCAmelCase , self.special_care_embeds )
_lowerCAmelCase :List[str] = cosine_distance(_UpperCAmelCase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
_lowerCAmelCase :Any = 0.0
_lowerCAmelCase :Union[str, Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
_lowerCAmelCase :Tuple = torch.any(special_scores > 0 , dim=1 )
_lowerCAmelCase :List[str] = special_care * 0.0_1
_lowerCAmelCase :Any = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
_lowerCAmelCase :Optional[Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
_lowerCAmelCase :List[str] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts | 687 | 1 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ):
"""simple docstring"""
if isinstance(__magic_name__ , torch.Tensor ):
return image
elif isinstance(__magic_name__ , PIL.Image.Image ):
_lowerCAmelCase :Tuple = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowerCAmelCase :List[Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_lowerCAmelCase :Optional[Any] = np.concatenate(__magic_name__ , axis=0 )
_lowerCAmelCase :Any = np.array(__magic_name__ ).astype(np.floataa ) / 255.0
_lowerCAmelCase :Optional[int] = image.transpose(0 , 3 , 1 , 2 )
_lowerCAmelCase :int = 2.0 * image - 1.0
_lowerCAmelCase :Optional[int] = torch.from_numpy(__magic_name__ )
elif isinstance(image[0] , torch.Tensor ):
_lowerCAmelCase :str = torch.cat(__magic_name__ , dim=0 )
return image
def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : int=0.9995 ):
"""simple docstring"""
if not isinstance(__magic_name__ , np.ndarray ):
_lowerCAmelCase :Tuple = True
_lowerCAmelCase :str = va.device
_lowerCAmelCase :List[str] = va.cpu().numpy()
_lowerCAmelCase :List[str] = va.cpu().numpy()
_lowerCAmelCase :Any = np.sum(va * va / (np.linalg.norm(__magic_name__ ) * np.linalg.norm(__magic_name__ )) )
if np.abs(__magic_name__ ) > DOT_THRESHOLD:
_lowerCAmelCase :Optional[Any] = (1 - t) * va + t * va
else:
_lowerCAmelCase :int = np.arccos(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = np.sin(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = theta_a * t
_lowerCAmelCase :str = np.sin(__magic_name__ )
_lowerCAmelCase :Any = np.sin(theta_a - theta_t ) / sin_theta_a
_lowerCAmelCase :Optional[Any] = sin_theta_t / sin_theta_a
_lowerCAmelCase :List[Any] = sa * va + sa * va
if inputs_are_torch:
_lowerCAmelCase :int = torch.from_numpy(__magic_name__ ).to(__magic_name__ )
return va
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :Any = F.normalize(__magic_name__ , dim=-1 )
_lowerCAmelCase :str = F.normalize(__magic_name__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
for param in model.parameters():
_lowerCAmelCase :List[str] = value
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def __init__( self: Any , _UpperCAmelCase: AutoencoderKL , _UpperCAmelCase: CLIPTextModel , _UpperCAmelCase: CLIPModel , _UpperCAmelCase: CLIPTokenizer , _UpperCAmelCase: UNetaDConditionModel , _UpperCAmelCase: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , _UpperCAmelCase: CLIPFeatureExtractor , _UpperCAmelCase: str=None , _UpperCAmelCase: Tuple=None , _UpperCAmelCase: Union[str, Any]=None , ):
super().__init__()
self.register_modules(
vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , clip_model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , coca_model=_UpperCAmelCase , coca_tokenizer=_UpperCAmelCase , coca_transform=_UpperCAmelCase , )
_lowerCAmelCase :int = (
feature_extractor.size
if isinstance(feature_extractor.size , _UpperCAmelCase )
else feature_extractor.size['shortest_edge']
)
_lowerCAmelCase :Union[str, Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _UpperCAmelCase )
set_requires_grad(self.clip_model , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowerCAmelCase :Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
self.enable_attention_slicing(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
set_requires_grad(self.vae , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
set_requires_grad(self.vae , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
set_requires_grad(self.unet , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
set_requires_grad(self.unet , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Dict ):
# get the original timestep using init_timestep
_lowerCAmelCase :Optional[Any] = min(int(num_inference_steps * strength ) , _UpperCAmelCase )
_lowerCAmelCase :List[str] = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase :Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Union[str, Any]=None ):
if not isinstance(_UpperCAmelCase , torch.Tensor ):
raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(_UpperCAmelCase )}""" )
_lowerCAmelCase :Union[str, Any] = image.to(device=_UpperCAmelCase , dtype=_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :List[Any] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCAmelCase )
]
_lowerCAmelCase :List[str] = torch.cat(_UpperCAmelCase , dim=0 )
else:
_lowerCAmelCase :List[str] = self.vae.encode(_UpperCAmelCase ).latent_dist.sample(_UpperCAmelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :List[Any] = 0.1_8_2_1_5 * init_latents
_lowerCAmelCase :List[Any] = init_latents.repeat_interleave(_UpperCAmelCase , dim=0 )
_lowerCAmelCase :Dict = randn_tensor(init_latents.shape , generator=_UpperCAmelCase , device=_UpperCAmelCase , dtype=_UpperCAmelCase )
# get latents
_lowerCAmelCase :Dict = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :List[str] = init_latents
return latents
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Union[str, Any] ):
_lowerCAmelCase :Optional[int] = self.coca_transform(_UpperCAmelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_lowerCAmelCase :Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_lowerCAmelCase :int = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: List[str] ):
_lowerCAmelCase :Optional[int] = self.feature_extractor.preprocess(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
_lowerCAmelCase :List[str] = self.clip_model.get_image_features(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase )
_lowerCAmelCase :Dict = image_embeddings_clip.repeat_interleave(_UpperCAmelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , _UpperCAmelCase: Dict , _UpperCAmelCase: str , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , ):
_lowerCAmelCase :Dict = latents.detach().requires_grad_()
_lowerCAmelCase :Optional[Any] = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
_lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_lowerCAmelCase :int = self.scheduler.alphas_cumprod[timestep]
_lowerCAmelCase :Optional[int] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase :str = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_lowerCAmelCase :Optional[Any] = torch.sqrt(_UpperCAmelCase )
_lowerCAmelCase :List[str] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _UpperCAmelCase ):
_lowerCAmelCase :Dict = self.scheduler.sigmas[index]
_lowerCAmelCase :Optional[Any] = latents - sigma * noise_pred
else:
raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :Tuple = 1 / 0.1_8_2_1_5 * sample
_lowerCAmelCase :Optional[Any] = self.vae.decode(_UpperCAmelCase ).sample
_lowerCAmelCase :List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase :Tuple = transforms.Resize(self.feature_extractor_size )(_UpperCAmelCase )
_lowerCAmelCase :Tuple = self.normalize(_UpperCAmelCase ).to(latents.dtype )
_lowerCAmelCase :List[Any] = self.clip_model.get_image_features(_UpperCAmelCase )
_lowerCAmelCase :List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase )
_lowerCAmelCase :Tuple = spherical_dist_loss(_UpperCAmelCase , _UpperCAmelCase ).mean() * clip_guidance_scale
_lowerCAmelCase :str = -torch.autograd.grad(_UpperCAmelCase , _UpperCAmelCase )[0]
if isinstance(self.scheduler , _UpperCAmelCase ):
_lowerCAmelCase :Union[str, Any] = latents.detach() + grads * (sigma**2)
_lowerCAmelCase :Dict = noise_pred_original
else:
_lowerCAmelCase :Optional[int] = noise_pred_original - torch.sqrt(_UpperCAmelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self: Optional[int] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: float = 0.6 , _UpperCAmelCase: Optional[int] = 50 , _UpperCAmelCase: Optional[float] = 7.5 , _UpperCAmelCase: Optional[int] = 1 , _UpperCAmelCase: float = 0.0 , _UpperCAmelCase: Optional[float] = 100 , _UpperCAmelCase: Optional[torch.Generator] = None , _UpperCAmelCase: Optional[str] = "pil" , _UpperCAmelCase: bool = True , _UpperCAmelCase: float = 0.8 , _UpperCAmelCase: float = 0.1 , _UpperCAmelCase: float = 0.1 , ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size:
raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(_UpperCAmelCase )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(_UpperCAmelCase , torch.Generator ) and batch_size > 1:
_lowerCAmelCase :int = [generator] + [None] * (batch_size - 1)
_lowerCAmelCase :List[Any] = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
_lowerCAmelCase :Optional[int] = [x[0] for x in coca_is_none if x[1]]
_lowerCAmelCase :List[str] = ', '.join(_UpperCAmelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_UpperCAmelCase ):
raise ValueError(
f"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowerCAmelCase :List[Any] = self.get_image_description(_UpperCAmelCase )
if style_prompt is None:
if len(_UpperCAmelCase ):
raise ValueError(
f"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowerCAmelCase :Any = self.get_image_description(_UpperCAmelCase )
# get prompt text embeddings for content and style
_lowerCAmelCase :Any = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , )
_lowerCAmelCase :str = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_lowerCAmelCase :int = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , )
_lowerCAmelCase :Union[str, Any] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_lowerCAmelCase :List[str] = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# duplicate text embeddings for each generation per prompt
_lowerCAmelCase :str = text_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 )
# set timesteps
_lowerCAmelCase :Any = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_lowerCAmelCase :Dict = {}
if accepts_offset:
_lowerCAmelCase :Optional[int] = 1
self.scheduler.set_timesteps(_UpperCAmelCase , **_UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_lowerCAmelCase , _lowerCAmelCase :List[str] = self.get_timesteps(_UpperCAmelCase , _UpperCAmelCase , self.device )
_lowerCAmelCase :int = timesteps[:1].repeat(_UpperCAmelCase )
# Preprocess image
_lowerCAmelCase :Dict = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :int = self.prepare_latents(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase )
_lowerCAmelCase :Any = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.prepare_latents(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase )
_lowerCAmelCase :str = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if clip_guidance_scale > 0:
_lowerCAmelCase :Optional[Any] = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Dict = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Any = slerp(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCAmelCase :int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase :Optional[int] = content_text_input.input_ids.shape[-1]
_lowerCAmelCase :Union[str, Any] = self.tokenizer([''] , padding='max_length' , max_length=_UpperCAmelCase , return_tensors='pt' )
_lowerCAmelCase :Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_lowerCAmelCase :Optional[int] = uncond_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase :int = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCAmelCase :Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_lowerCAmelCase :Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_lowerCAmelCase :Any = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device='cpu' , dtype=_UpperCAmelCase ).to(
self.device )
else:
_lowerCAmelCase :List[Any] = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_lowerCAmelCase :int = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase :Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase :Any = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase :Any = {}
if accepts_eta:
_lowerCAmelCase :Any = eta
# check if the scheduler accepts generator
_lowerCAmelCase :List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_lowerCAmelCase :List[Any] = generator
with self.progress_bar(total=_UpperCAmelCase ):
for i, t in enumerate(_UpperCAmelCase ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase :Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase :Tuple = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
_lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase :List[str] = noise_pred.chunk(2 )
_lowerCAmelCase :Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_lowerCAmelCase :List[Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_lowerCAmelCase , _lowerCAmelCase :List[str] = self.cond_fn(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase :str = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :str = 1 / 0.1_8_2_1_5 * latents
_lowerCAmelCase :Any = self.vae.decode(_UpperCAmelCase ).sample
_lowerCAmelCase :List[str] = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase :Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase :List[Any] = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_UpperCAmelCase , nsfw_content_detected=_UpperCAmelCase ) | 687 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a = 6_3_7_8_1_3_7.0
a = 6_3_5_6_7_5_2.3_1_4_2_4_5
a = 6_378_137
def UpperCamelCase_( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , __magic_name__ : float ):
"""simple docstring"""
_lowerCAmelCase :List[Any] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_lowerCAmelCase :Union[str, Any] = atan((1 - flattening) * tan(radians(__magic_name__ ) ) )
_lowerCAmelCase :List[str] = atan((1 - flattening) * tan(radians(__magic_name__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_lowerCAmelCase :int = haversine_distance(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_lowerCAmelCase :str = (b_lata + b_lata) / 2
_lowerCAmelCase :Tuple = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_lowerCAmelCase :str = (sin(__magic_name__ ) ** 2) * (cos(__magic_name__ ) ** 2)
_lowerCAmelCase :Optional[int] = cos(sigma / 2 ) ** 2
_lowerCAmelCase :List[Any] = (sigma - sin(__magic_name__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_lowerCAmelCase :Dict = (cos(__magic_name__ ) ** 2) * (sin(__magic_name__ ) ** 2)
_lowerCAmelCase :str = sin(sigma / 2 ) ** 2
_lowerCAmelCase :Union[str, Any] = (sigma + sin(__magic_name__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod() | 687 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE__ ( self: int ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_lowerCAmelCase :List[Any] = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :List[str] = TFAutoModel.from_pretrained(_UpperCAmelCase , from_pt=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Tuple = AutoModel.from_pretrained(_UpperCAmelCase , from_tf=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_lowerCAmelCase :List[str] = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :List[Any] = TFAutoModelForPreTraining.from_pretrained(_UpperCAmelCase , from_pt=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = AutoModelForPreTraining.from_pretrained(_UpperCAmelCase , from_tf=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase :Dict = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Tuple = TFAutoModelForCausalLM.from_pretrained(_UpperCAmelCase , from_pt=_UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase :Optional[int] = TFAutoModelForCausalLM.from_pretrained(
_UpperCAmelCase , output_loading_info=_UpperCAmelCase , from_pt=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Any = AutoModelForCausalLM.from_pretrained(_UpperCAmelCase , from_tf=_UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase :List[str] = AutoModelForCausalLM.from_pretrained(
_UpperCAmelCase , output_loading_info=_UpperCAmelCase , from_tf=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase :Union[str, Any] = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(_UpperCAmelCase , from_pt=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :str = AutoModelWithLMHead.from_pretrained(_UpperCAmelCase , from_tf=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase :Union[str, Any] = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Dict = TFAutoModelForMaskedLM.from_pretrained(_UpperCAmelCase , from_pt=_UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase :int = TFAutoModelForMaskedLM.from_pretrained(
_UpperCAmelCase , output_loading_info=_UpperCAmelCase , from_pt=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = AutoModelForMaskedLM.from_pretrained(_UpperCAmelCase , from_tf=_UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase :Union[str, Any] = AutoModelForMaskedLM.from_pretrained(
_UpperCAmelCase , output_loading_info=_UpperCAmelCase , from_tf=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase :List[str] = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase , from_pt=_UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase :Any = TFAutoModelForSeqaSeqLM.from_pretrained(
_UpperCAmelCase , output_loading_info=_UpperCAmelCase , from_pt=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase , from_tf=_UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase :Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(
_UpperCAmelCase , output_loading_info=_UpperCAmelCase , from_tf=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_lowerCAmelCase :Dict = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Optional[int] = TFAutoModelForSequenceClassification.from_pretrained(_UpperCAmelCase , from_pt=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = AutoModelForSequenceClassification.from_pretrained(_UpperCAmelCase , from_tf=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self: Any ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_lowerCAmelCase :Optional[int] = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :List[Any] = TFAutoModelForQuestionAnswering.from_pretrained(_UpperCAmelCase , from_pt=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = AutoModelForQuestionAnswering.from_pretrained(_UpperCAmelCase , from_tf=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Optional[int] = TFAutoModelWithLMHead.from_pretrained(_UpperCAmelCase , from_pt=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=_UpperCAmelCase ) , 1_4410 )
_lowerCAmelCase :Union[str, Any] = AutoModelWithLMHead.from_pretrained(_UpperCAmelCase , from_tf=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=_UpperCAmelCase ) , 1_4410 )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :List[str] = TFAutoModelWithLMHead.from_pretrained(_UpperCAmelCase , from_pt=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=_UpperCAmelCase ) , 1_4410 )
_lowerCAmelCase :Tuple = AutoModelWithLMHead.from_pretrained(_UpperCAmelCase , from_tf=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=_UpperCAmelCase ) , 1_4410 ) | 687 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Dict = 'encoder-decoder'
lowerCamelCase : Optional[Any] = True
def __init__( self: str , **_UpperCAmelCase: int ):
super().__init__(**_UpperCAmelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_lowerCAmelCase :Optional[Any] = kwargs.pop('encoder' )
_lowerCAmelCase :Dict = encoder_config.pop('model_type' )
_lowerCAmelCase :str = kwargs.pop('decoder' )
_lowerCAmelCase :str = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase :str = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Tuple = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Any = True
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls: Tuple , _UpperCAmelCase: PretrainedConfig , _UpperCAmelCase: PretrainedConfig , **_UpperCAmelCase: str ):
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
_lowerCAmelCase :Dict = True
_lowerCAmelCase :List[str] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowerCAmelCase :Optional[int] = self.encoder.to_dict()
_lowerCAmelCase :Union[str, Any] = self.decoder.to_dict()
_lowerCAmelCase :List[str] = self.__class__.model_type
return output | 687 | 1 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
a = [
"""word_embeddings_layernorm.weight""",
"""word_embeddings_layernorm.bias""",
"""input_layernorm.weight""",
"""input_layernorm.bias""",
"""post_attention_layernorm.weight""",
"""post_attention_layernorm.bias""",
"""self_attention.dense.bias""",
"""mlp.dense_4h_to_h.bias""",
"""ln_f.weight""",
"""ln_f.bias""",
]
a = [
"""mlp.dense_4h_to_h.weight""",
"""self_attention.dense.weight""",
]
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Dict ):
"""simple docstring"""
_lowerCAmelCase :Any = {
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
_lowerCAmelCase :Dict = int(re.match(r'.*layer_(\d*).*' , __magic_name__ )[1] )
layer_number -= 3
return f"""h.{layer_number}.""" + key
def UpperCamelCase_( __magic_name__ : Any ):
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
_lowerCAmelCase :int = re.search(r'[^\d](\d+)$' , str(__magic_name__ ) )
if bit_search is None:
raise ValueError(f"""`dtype` is not a valid dtype: {dtype}.""" )
_lowerCAmelCase :List[str] = int(bit_search.groups()[0] )
return bit_size // 8
def UpperCamelCase_( __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Dict ):
"""simple docstring"""
if bloom_config_file == "":
_lowerCAmelCase :List[Any] = BloomConfig()
else:
_lowerCAmelCase :Dict = BloomConfig.from_json_file(__magic_name__ )
if shard_model:
_lowerCAmelCase :str = os.listdir(__magic_name__ )
_lowerCAmelCase :int = sorted(filter(lambda __magic_name__ : s.startswith('layer' ) and "model_00" in s , __magic_name__ ) )
_lowerCAmelCase :Any = {'weight_map': {}, 'metadata': {}}
_lowerCAmelCase :int = 0
_lowerCAmelCase :str = None
_lowerCAmelCase :List[Any] = BloomConfig()
for j, file in enumerate(__magic_name__ ):
print('Processing file: {}'.format(__magic_name__ ) )
_lowerCAmelCase :List[Any] = None
for i in range(__magic_name__ ):
# load all TP files
_lowerCAmelCase :Optional[int] = file.replace('model_00' , f"""model_0{i}""" )
_lowerCAmelCase :Tuple = torch.load(os.path.join(__magic_name__ , __magic_name__ ) , map_location='cpu' )
# Rename keys in the transformers names
_lowerCAmelCase :Optional[Any] = list(temp.keys() )
for key in keys:
_lowerCAmelCase :Tuple = temp.pop(__magic_name__ )
if tensors is None:
_lowerCAmelCase :Tuple = temp
else:
for key in tensors.keys():
if any(key.endswith(__magic_name__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_lowerCAmelCase :List[str] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
_lowerCAmelCase :Tuple = torch.cat([tensors[key], temp[key]] , dim=__magic_name__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__magic_name__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
_lowerCAmelCase :str = tensors[key] / pretraining_tp
torch.save(
__magic_name__ , os.path.join(
__magic_name__ , 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) , str(len(__magic_name__ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
_lowerCAmelCase :Dict = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
_lowerCAmelCase :Tuple = 'pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) , str(len(__magic_name__ ) ).zfill(5 ) )
_lowerCAmelCase :int = BloomConfig()
_lowerCAmelCase :Optional[int] = pytorch_dump_folder_path + '/' + CONFIG_NAME
_lowerCAmelCase :List[Any] = total_size
with open(__magic_name__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(__magic_name__ , WEIGHTS_NAME + '.index.json' ) , 'w' , encoding='utf-8' ) as f:
_lowerCAmelCase :int = json.dumps(__magic_name__ , indent=2 , sort_keys=__magic_name__ ) + '\n'
f.write(__magic_name__ )
else:
_lowerCAmelCase :Tuple = BloomModel(__magic_name__ )
_lowerCAmelCase :Optional[int] = os.listdir(__magic_name__ )
_lowerCAmelCase :List[Any] = sorted(filter(lambda __magic_name__ : s.startswith('layer' ) and "model_00" in s , __magic_name__ ) )
_lowerCAmelCase :Any = None
for i, file in enumerate(__magic_name__ ):
_lowerCAmelCase :Union[str, Any] = None
for i in range(__magic_name__ ):
# load all TP files
_lowerCAmelCase :str = file.replace('model_00' , f"""model_0{i}""" )
_lowerCAmelCase :Tuple = torch.load(os.path.join(__magic_name__ , __magic_name__ ) , map_location='cpu' )
# Rename keys in the transformers names
_lowerCAmelCase :List[Any] = list(temp.keys() )
for key in keys:
_lowerCAmelCase :List[Any] = temp.pop(__magic_name__ )
if tensors is None:
_lowerCAmelCase :List[Any] = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(__magic_name__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_lowerCAmelCase :List[Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
_lowerCAmelCase :Tuple = torch.cat([tensors[key], temp[key]] , dim=__magic_name__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__magic_name__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
_lowerCAmelCase :Any = tensors[key] / pretraining_tp
_lowerCAmelCase :Optional[Any] = model.load_state_dict(__magic_name__ , strict=__magic_name__ )
assert not other_keys.unexpected_keys, f"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
_lowerCAmelCase :Union[str, Any] = set(other_keys.missing_keys )
else:
_lowerCAmelCase :int = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
_lowerCAmelCase :List[str] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
_lowerCAmelCase :List[str] = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
_lowerCAmelCase :Optional[Any] = model.to(config.torch_dtype )
torch.save(model.state_dict() , __magic_name__ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(__magic_name__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bloom_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path to the Megatron-LM checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--bloom_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--shard_model""",
action="""store_true""",
help="""An optional setting to shard the output model \nThis enables sharding the converted checkpoint""",
)
parser.add_argument(
"""--pretraining_tp""",
default=4,
type=int,
help="""Pretraining TP rank that has been used when training the model in Megatron-LM \n""",
)
a = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
) | 687 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: int , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=13 , _UpperCAmelCase: Optional[Any]=32 , _UpperCAmelCase: List[Any]=2 , _UpperCAmelCase: Optional[int]=3 , _UpperCAmelCase: Optional[int]=16 , _UpperCAmelCase: Optional[Any]=[32, 64, 128] , _UpperCAmelCase: Optional[int]=[1, 2, 1] , _UpperCAmelCase: int=[2, 2, 4] , _UpperCAmelCase: List[str]=2 , _UpperCAmelCase: Dict=2.0 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: str=0.0 , _UpperCAmelCase: int=0.0 , _UpperCAmelCase: str=0.1 , _UpperCAmelCase: Dict="gelu" , _UpperCAmelCase: Optional[Any]=False , _UpperCAmelCase: Union[str, Any]=True , _UpperCAmelCase: Union[str, Any]=0.0_2 , _UpperCAmelCase: Optional[int]=1e-5 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: Optional[Any]=None , _UpperCAmelCase: Tuple=True , _UpperCAmelCase: str=10 , _UpperCAmelCase: int=8 , _UpperCAmelCase: List[Any]=["stage1", "stage2"] , _UpperCAmelCase: List[Any]=[1, 2] , ):
_lowerCAmelCase :Optional[int] = parent
_lowerCAmelCase :Dict = batch_size
_lowerCAmelCase :Optional[Any] = image_size
_lowerCAmelCase :Optional[Any] = patch_size
_lowerCAmelCase :List[Any] = num_channels
_lowerCAmelCase :Optional[int] = embed_dim
_lowerCAmelCase :List[str] = hidden_sizes
_lowerCAmelCase :Union[str, Any] = depths
_lowerCAmelCase :int = num_heads
_lowerCAmelCase :Any = window_size
_lowerCAmelCase :List[Any] = mlp_ratio
_lowerCAmelCase :Optional[int] = qkv_bias
_lowerCAmelCase :Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase :Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase :Dict = drop_path_rate
_lowerCAmelCase :List[Any] = hidden_act
_lowerCAmelCase :Tuple = use_absolute_embeddings
_lowerCAmelCase :Optional[int] = patch_norm
_lowerCAmelCase :Optional[Any] = layer_norm_eps
_lowerCAmelCase :Union[str, Any] = initializer_range
_lowerCAmelCase :List[str] = is_training
_lowerCAmelCase :str = scope
_lowerCAmelCase :Optional[int] = use_labels
_lowerCAmelCase :List[Any] = type_sequence_label_size
_lowerCAmelCase :Union[str, Any] = encoder_stride
_lowerCAmelCase :Optional[int] = out_features
_lowerCAmelCase :List[str] = out_indices
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase :Dict = None
if self.use_labels:
_lowerCAmelCase :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase :str = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self: int ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple ):
_lowerCAmelCase :List[Any] = FocalNetModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCAmelCase :List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Union[str, Any] = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCAmelCase :Optional[int] = None
_lowerCAmelCase :Dict = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Any = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Any = FocalNetForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCAmelCase :List[Any] = 1
_lowerCAmelCase :List[Any] = FocalNetForMaskedImageModeling(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :int = model(_UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int , _UpperCAmelCase: Dict , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = self.type_sequence_label_size
_lowerCAmelCase :Dict = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase :Optional[int] = 1
_lowerCAmelCase :Tuple = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = config_and_inputs
_lowerCAmelCase :List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ (snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase : Optional[Any] = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Tuple = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Any = False
lowerCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = FocalNetModelTester(self )
_lowerCAmelCase :str = ConfigTester(self , config_class=_UpperCAmelCase , embed_dim=37 , has_text_modality=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
return
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def SCREAMING_SNAKE_CASE__ ( self: str ):
pass
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase :Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase , _lowerCAmelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Tuple = model_class(_UpperCAmelCase )
_lowerCAmelCase :Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase :int = [*signature.parameters.keys()]
_lowerCAmelCase :List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: int , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase :Optional[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_lowerCAmelCase :List[Any] = outputs.hidden_states
_lowerCAmelCase :str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# FocalNet has a different seq_length
_lowerCAmelCase :Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_lowerCAmelCase :List[str] = outputs.reshaped_hidden_states
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :int = reshaped_hidden_states[0].shape
_lowerCAmelCase :Optional[int] = (
reshaped_hidden_states[0].view(_UpperCAmelCase , _UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[int] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Dict = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase , _lowerCAmelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :str = 3
_lowerCAmelCase :Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCAmelCase :int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCAmelCase :Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :List[str] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Union[str, Any] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
@slow
def SCREAMING_SNAKE_CASE__ ( self: int ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase :List[Any] = FocalNetModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :Optional[int] = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
_lowerCAmelCase :str = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Tuple = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.default_image_processor
_lowerCAmelCase :Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCAmelCase :Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase :Dict = model(**_UpperCAmelCase )
# verify the logits
_lowerCAmelCase :str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
_lowerCAmelCase :Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : int = (FocalNetBackbone,) if is_torch_available() else ()
lowerCamelCase : str = FocalNetConfig
lowerCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Any = FocalNetModelTester(self ) | 687 | 1 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str = "cpu" , __magic_name__ : Union[str, None] = None ):
"""simple docstring"""
_lowerCAmelCase :Tuple = torch.load(__magic_name__ , map_location=__magic_name__ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__magic_name__ , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
_lowerCAmelCase :int = v.half()
if save_path is None: # overwrite src_path
_lowerCAmelCase :Dict = src_path
torch.save(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
fire.Fire(convert) | 687 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
a = HfApi()
a = {}
# fmt: off
a = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
a = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
a = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
a = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
a = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
a = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
a = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
a = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
a = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
a = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
a = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
a = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
a = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
a = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
a = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
a = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
a = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith("""CompVis"""):
a = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
a = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
a = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
a = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
a = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3
)
print(F'''{mod.modelId} has passed successfully!!!''') | 687 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a = logging.get_logger(__name__)
a = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class UpperCAmelCase_ (snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCamelCase : Dict = 'resnet'
lowerCamelCase : int = ['basic', 'bottleneck']
def __init__( self: int , _UpperCAmelCase: Union[str, Any]=3 , _UpperCAmelCase: Optional[Any]=64 , _UpperCAmelCase: Optional[int]=[256, 512, 1024, 2048] , _UpperCAmelCase: Union[str, Any]=[3, 4, 6, 3] , _UpperCAmelCase: Optional[int]="bottleneck" , _UpperCAmelCase: str="relu" , _UpperCAmelCase: Dict=False , _UpperCAmelCase: List[str]=None , _UpperCAmelCase: str=None , **_UpperCAmelCase: Optional[Any] , ):
super().__init__(**_UpperCAmelCase )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
_lowerCAmelCase :Tuple = num_channels
_lowerCAmelCase :Union[str, Any] = embedding_size
_lowerCAmelCase :Any = hidden_sizes
_lowerCAmelCase :List[str] = depths
_lowerCAmelCase :Dict = layer_type
_lowerCAmelCase :int = hidden_act
_lowerCAmelCase :Optional[Any] = downsample_in_first_stage
_lowerCAmelCase :List[str] = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(_UpperCAmelCase ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase :Any = get_aligned_output_features_output_indices(
out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names )
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Dict = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self: str ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
return 1e-3 | 687 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Optional[int] = 10
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :str = [1, 2, 3, 4]
_lowerCAmelCase :Union[str, Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_lowerCAmelCase :Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :List[str] = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
_lowerCAmelCase , _lowerCAmelCase :Optional[Any] = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [] )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Optional[int] = ''
_lowerCAmelCase , _lowerCAmelCase :str = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [] )
self.assertEqual(_UpperCAmelCase , [] )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Optional[Any] = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
_lowerCAmelCase , _lowerCAmelCase :Optional[int] = process_story(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Optional[int] = ['It was the best of times.']
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :Union[str, Any] = torch.tensor([1, 2, 3, 4] )
_lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 0 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :List[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_lowerCAmelCase :Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 23 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 1 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :List[str] = 101
_lowerCAmelCase :Dict = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_lowerCAmelCase :int = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_lowerCAmelCase :List[str] = compute_token_type_ids(_UpperCAmelCase , _UpperCAmelCase )
np.testing.assert_array_equal(_UpperCAmelCase , _UpperCAmelCase ) | 687 | 1 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""",
}
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = 't5'
lowerCamelCase : Any = ['past_key_values']
lowerCamelCase : List[Any] = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self: Any , _UpperCAmelCase: int=3_2128 , _UpperCAmelCase: str=512 , _UpperCAmelCase: List[str]=64 , _UpperCAmelCase: Optional[int]=2048 , _UpperCAmelCase: List[Any]=6 , _UpperCAmelCase: Any=None , _UpperCAmelCase: Dict=8 , _UpperCAmelCase: Any=32 , _UpperCAmelCase: List[str]=128 , _UpperCAmelCase: Tuple=0.1 , _UpperCAmelCase: Optional[Any]=1e-6 , _UpperCAmelCase: int=1.0 , _UpperCAmelCase: str="relu" , _UpperCAmelCase: int=True , _UpperCAmelCase: List[Any]=True , _UpperCAmelCase: int=0 , _UpperCAmelCase: Union[str, Any]=1 , **_UpperCAmelCase: Any , ):
_lowerCAmelCase :List[Any] = vocab_size
_lowerCAmelCase :Union[str, Any] = d_model
_lowerCAmelCase :Union[str, Any] = d_kv
_lowerCAmelCase :Union[str, Any] = d_ff
_lowerCAmelCase :Tuple = num_layers
_lowerCAmelCase :Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_lowerCAmelCase :List[str] = num_heads
_lowerCAmelCase :Optional[int] = relative_attention_num_buckets
_lowerCAmelCase :Any = relative_attention_max_distance
_lowerCAmelCase :List[Any] = dropout_rate
_lowerCAmelCase :str = layer_norm_epsilon
_lowerCAmelCase :Dict = initializer_factor
_lowerCAmelCase :Any = feed_forward_proj
_lowerCAmelCase :Optional[Any] = use_cache
_lowerCAmelCase :Tuple = self.feed_forward_proj.split('-' )
_lowerCAmelCase :List[str] = act_info[-1]
_lowerCAmelCase :Union[str, Any] = act_info[0] == 'gated'
if len(_UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(_UpperCAmelCase ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_lowerCAmelCase :Tuple = 'gelu_new'
super().__init__(
pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase , )
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :List[Any] = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
_lowerCAmelCase :List[str] = 'past_encoder_sequence + sequence'
_lowerCAmelCase :Dict = {0: 'batch'}
_lowerCAmelCase :Dict = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_lowerCAmelCase :str = {0: 'batch', 1: 'decoder_sequence'}
_lowerCAmelCase :Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='inputs' )
return common_inputs
@property
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
return 13 | 687 |
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
a = int(input("""Enter number: """).strip())
print(F'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''') | 687 | 1 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :List[str] = 'ylacombe/bark-small'
_lowerCAmelCase :int = tempfile.mkdtemp()
_lowerCAmelCase :List[str] = 'en_speaker_1'
_lowerCAmelCase :Union[str, Any] = 'This is a test string'
_lowerCAmelCase :List[Any] = 'speaker_embeddings_path.json'
_lowerCAmelCase :str = 'speaker_embeddings'
def SCREAMING_SNAKE_CASE__ ( self: str , **_UpperCAmelCase: Optional[Any] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :List[Any] = self.get_tokenizer()
_lowerCAmelCase :List[str] = BarkProcessor(tokenizer=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase :List[str] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_lowerCAmelCase :Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_lowerCAmelCase :Any = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_lowerCAmelCase :List[Any] = 35
_lowerCAmelCase :Optional[int] = 2
_lowerCAmelCase :Dict = 8
_lowerCAmelCase :Dict = {
'semantic_prompt': np.ones(_UpperCAmelCase ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_lowerCAmelCase :Dict = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
_lowerCAmelCase :List[Any] = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_lowerCAmelCase :int = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Dict = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_lowerCAmelCase :Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Tuple = self.get_tokenizer()
_lowerCAmelCase :Union[str, Any] = BarkProcessor(tokenizer=_UpperCAmelCase )
_lowerCAmelCase :List[Any] = processor(text=self.input_string )
_lowerCAmelCase :List[str] = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() ) | 687 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: List[Any] , _UpperCAmelCase: int , _UpperCAmelCase: MutableSequence[float] ):
if len(_UpperCAmelCase ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
_lowerCAmelCase :list[float] = list(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = degree
def __add__( self: str , _UpperCAmelCase: Polynomial ):
if self.degree > polynomial_a.degree:
_lowerCAmelCase :Any = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _UpperCAmelCase )
else:
_lowerCAmelCase :List[Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _UpperCAmelCase )
def __sub__( self: str , _UpperCAmelCase: Polynomial ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self: Union[str, Any] ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self: int , _UpperCAmelCase: Polynomial ):
_lowerCAmelCase :list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: int | float ):
_lowerCAmelCase :int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self: Union[str, Any] ):
_lowerCAmelCase :Dict = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_UpperCAmelCase )
return polynomial
def __repr__( self: Optional[Any] ):
return self.__str__()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :list[float] = [0] * self.degree
for i in range(self.degree ):
_lowerCAmelCase :Tuple = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int | float = 0 ):
_lowerCAmelCase :list[float] = [0] * (self.degree + 2)
_lowerCAmelCase :str = constant
for i in range(self.degree + 1 ):
_lowerCAmelCase :List[str] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _UpperCAmelCase )
def __eq__( self: List[Any] , _UpperCAmelCase: object ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self: Optional[Any] , _UpperCAmelCase: object ):
return not self.__eq__(_UpperCAmelCase ) | 687 | 1 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
a = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
a = json.load(f)
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Tuple ):
return FSMTTokenizer.from_pretrained(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :int = FSMTForConditionalGeneration.from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 2_6.0],
['ru-en', 2_2.0],
['en-de', 2_2.0],
['de-en', 2_9.0],
] )
@slow
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: Dict , _UpperCAmelCase: Dict ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
_lowerCAmelCase :Tuple = f"""facebook/wmt19-{pair}"""
_lowerCAmelCase :int = self.get_tokenizer(_UpperCAmelCase )
_lowerCAmelCase :str = self.get_model(_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = bleu_data[pair]['src']
_lowerCAmelCase :Optional[Any] = bleu_data[pair]['tgt']
_lowerCAmelCase :Union[str, Any] = tokenizer(_UpperCAmelCase , return_tensors='pt' , truncation=_UpperCAmelCase , padding='longest' ).to(_UpperCAmelCase )
_lowerCAmelCase :Any = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
_lowerCAmelCase :Any = tokenizer.batch_decode(
_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
_lowerCAmelCase :List[Any] = calculate_bleu(_UpperCAmelCase , _UpperCAmelCase )
print(_UpperCAmelCase )
self.assertGreaterEqual(scores['bleu'] , _UpperCAmelCase ) | 687 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 | 1 |
from __future__ import annotations
from collections import namedtuple
def UpperCamelCase_( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float ):
"""simple docstring"""
_lowerCAmelCase :Any = namedtuple('result' , 'name value' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('Only one argument must be 0' )
elif power < 0:
raise ValueError(
'Power cannot be negative in any electrical/electronics system' )
elif voltage == 0:
return result('voltage' , power / current )
elif current == 0:
return result('current' , power / voltage )
elif power == 0:
return result('power' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 687 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : float | Decimal , __magic_name__ : float = 10**-10 ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = a
while True:
_lowerCAmelCase :str = Decimal(__magic_name__ ) - (
Decimal(eval(__magic_name__ ) ) / Decimal(eval(str(diff(__magic_name__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__magic_name__ ) ) < precision: # noqa: S307
return float(__magic_name__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''') | 687 | 1 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
a = parser.parse_args()
a = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
a = CLIPImageProcessor()
a = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
a = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path) | 687 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
a = {
"""sample_size""": 32,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [32, 64],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a = {
"""sample_size""": 64,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a = {
"""sample_size""": 256,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a = {
"""num_train_timesteps""": 40,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
a = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
a = {
"""num_train_timesteps""": 151,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
def UpperCamelCase_( __magic_name__ : Dict ):
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any]=False ):
"""simple docstring"""
_lowerCAmelCase :int = checkpoint[f"""{old_prefix}.in_layers.0.weight"""]
_lowerCAmelCase :Union[str, Any] = checkpoint[f"""{old_prefix}.in_layers.0.bias"""]
_lowerCAmelCase :str = checkpoint[f"""{old_prefix}.in_layers.2.weight"""]
_lowerCAmelCase :Optional[Any] = checkpoint[f"""{old_prefix}.in_layers.2.bias"""]
_lowerCAmelCase :str = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""]
_lowerCAmelCase :Any = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""]
_lowerCAmelCase :str = checkpoint[f"""{old_prefix}.out_layers.0.weight"""]
_lowerCAmelCase :List[Any] = checkpoint[f"""{old_prefix}.out_layers.0.bias"""]
_lowerCAmelCase :Optional[int] = checkpoint[f"""{old_prefix}.out_layers.3.weight"""]
_lowerCAmelCase :Dict = checkpoint[f"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
_lowerCAmelCase :List[Any] = checkpoint[f"""{old_prefix}.skip_connection.weight"""]
_lowerCAmelCase :int = checkpoint[f"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : List[str]=None ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Tuple = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Any = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
_lowerCAmelCase :int = checkpoint[f"""{old_prefix}.norm.weight"""]
_lowerCAmelCase :Dict = checkpoint[f"""{old_prefix}.norm.bias"""]
_lowerCAmelCase :Dict = weight_q.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :str = bias_q.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :List[str] = weight_k.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :Optional[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :Tuple = weight_v.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :List[Any] = bias_v.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :int = (
checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
_lowerCAmelCase :Optional[Any] = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Optional[Any] ):
"""simple docstring"""
_lowerCAmelCase :Union[str, Any] = torch.load(__magic_name__ , map_location='cpu' )
_lowerCAmelCase :List[Any] = {}
_lowerCAmelCase :List[str] = checkpoint['time_embed.0.weight']
_lowerCAmelCase :Tuple = checkpoint['time_embed.0.bias']
_lowerCAmelCase :Dict = checkpoint['time_embed.2.weight']
_lowerCAmelCase :Union[str, Any] = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_lowerCAmelCase :Union[str, Any] = checkpoint['label_emb.weight']
_lowerCAmelCase :str = checkpoint['input_blocks.0.0.weight']
_lowerCAmelCase :str = checkpoint['input_blocks.0.0.bias']
_lowerCAmelCase :List[Any] = unet_config['down_block_types']
_lowerCAmelCase :Any = unet_config['layers_per_block']
_lowerCAmelCase :List[Any] = unet_config['attention_head_dim']
_lowerCAmelCase :Tuple = unet_config['block_out_channels']
_lowerCAmelCase :List[str] = 1
_lowerCAmelCase :Optional[int] = channels_list[0]
for i, layer_type in enumerate(__magic_name__ ):
_lowerCAmelCase :Tuple = channels_list[i]
_lowerCAmelCase :Optional[Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__magic_name__ ):
_lowerCAmelCase :int = f"""down_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :List[Any] = f"""input_blocks.{current_layer}.0"""
_lowerCAmelCase :int = True if j == 0 and downsample_block_has_skip else False
_lowerCAmelCase :List[Any] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__magic_name__ ):
_lowerCAmelCase :List[str] = f"""down_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :Optional[int] = f"""input_blocks.{current_layer}.0"""
_lowerCAmelCase :List[str] = True if j == 0 and downsample_block_has_skip else False
_lowerCAmelCase :Optional[int] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
_lowerCAmelCase :Optional[int] = f"""down_blocks.{i}.attentions.{j}"""
_lowerCAmelCase :str = f"""input_blocks.{current_layer}.1"""
_lowerCAmelCase :Optional[Any] = convert_attention(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
_lowerCAmelCase :Union[str, Any] = f"""down_blocks.{i}.downsamplers.0"""
_lowerCAmelCase :Tuple = f"""input_blocks.{current_layer}.0"""
_lowerCAmelCase :Optional[int] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
current_layer += 1
_lowerCAmelCase :Dict = current_channels
# hardcoded the mid-block for now
_lowerCAmelCase :int = 'mid_block.resnets.0'
_lowerCAmelCase :Optional[Any] = 'middle_block.0'
_lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Optional[int] = 'mid_block.attentions.0'
_lowerCAmelCase :Optional[int] = 'middle_block.1'
_lowerCAmelCase :List[Any] = convert_attention(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Union[str, Any] = 'mid_block.resnets.1'
_lowerCAmelCase :Optional[int] = 'middle_block.2'
_lowerCAmelCase :int = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Tuple = 0
_lowerCAmelCase :str = unet_config['up_block_types']
for i, layer_type in enumerate(__magic_name__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCAmelCase :Optional[Any] = f"""up_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :Dict = f"""output_blocks.{current_layer}.0"""
_lowerCAmelCase :Any = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
_lowerCAmelCase :Any = f"""up_blocks.{i}.upsamplers.0"""
_lowerCAmelCase :Dict = f"""output_blocks.{current_layer-1}.1"""
_lowerCAmelCase :Tuple = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCAmelCase :Tuple = f"""up_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :List[str] = f"""output_blocks.{current_layer}.0"""
_lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
_lowerCAmelCase :str = f"""up_blocks.{i}.attentions.{j}"""
_lowerCAmelCase :List[Any] = f"""output_blocks.{current_layer}.1"""
_lowerCAmelCase :int = convert_attention(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
_lowerCAmelCase :Optional[int] = f"""up_blocks.{i}.upsamplers.0"""
_lowerCAmelCase :int = f"""output_blocks.{current_layer-1}.2"""
_lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :str = checkpoint['out.0.weight']
_lowerCAmelCase :Union[str, Any] = checkpoint['out.0.bias']
_lowerCAmelCase :List[Any] = checkpoint['out.2.weight']
_lowerCAmelCase :Dict = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
a = parser.parse_args()
a = strabool(args.class_cond)
a = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
a = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
a = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
a = None
a = con_pt_to_diffuser(args.unet_path, unet_config)
a = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
a = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
a = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
a = CMStochasticIterativeScheduler(**scheduler_config)
a = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path) | 687 | 1 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
a = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
a = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
a = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: List[List[List[str]]] , _UpperCAmelCase: List[List[str]] , _UpperCAmelCase: int = 1 , _UpperCAmelCase: int = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_UpperCAmelCase , hypotheses=_UpperCAmelCase , min_len=_UpperCAmelCase , max_len=_UpperCAmelCase )
} | 687 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
a = """ \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
_lowerCAmelCase :Tuple = self.diffusers_dir
shutil.copy(
os.path.join(_UpperCAmelCase , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :str = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=None ):
_lowerCAmelCase :int = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_lowerCAmelCase :Dict = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_lowerCAmelCase :Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_lowerCAmelCase :List[str] = black.format_str(_UpperCAmelCase , mode=_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(_UpperCAmelCase , 'w' , newline='\n' ) as f:
f.write(_UpperCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_UpperCAmelCase )
with open(_UpperCAmelCase , 'r' ) as f:
self.assertTrue(f.read() , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :List[str] = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
# Base copy consistency
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , _UpperCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , )
# Copy consistency with a really long name
_lowerCAmelCase :Optional[int] = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , _UpperCAmelCase , _UpperCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , _UpperCAmelCase , overwrite_result=re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , ) | 687 | 1 |
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : int ):
"""simple docstring"""
return 1 if input_a == input_a else 0
def UpperCamelCase_( ):
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1)) | 687 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be trained.'} )
lowerCamelCase : Optional[str] = field(
default='./' , metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path of training dataset.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for training.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for evaluation.'} )
lowerCamelCase : Optional[float] = field(default=0.1 , metadata={'help': 'Value of weight decay.'} )
lowerCamelCase : Optional[int] = field(
default=1_00_00 , metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} )
lowerCamelCase : Optional[float] = field(default=2e-4 , metadata={'help': 'Learning rate fo training.'} )
lowerCamelCase : Optional[str] = field(default='cosine' , metadata={'help': 'Learning rate.'} )
lowerCamelCase : Optional[int] = field(
default=7_50 , metadata={'help': 'Number of warmup steps in the learning rate schedule.'} )
lowerCamelCase : Optional[int] = field(
default=16 , metadata={'help': 'Number of gradient accumulation steps.'} )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} )
lowerCamelCase : Optional[int] = field(default=5_00_00 , metadata={'help': 'Maximum number of training steps.'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Sequence lengths used for training.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Training seed.'} )
lowerCamelCase : Optional[int] = field(
default=10_24 , metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} , )
lowerCamelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'States path if the training should continue from a checkpoint folder.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'If True the data is pretokenized.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size used for evaluation.'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Length of sequences to be evaluated.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} )
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} , )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Sample from the language model\'s output distribution.'} )
lowerCamelCase : Optional[float] = field(default=0.2 , metadata={'help': 'Sampling temperature used for generation.'} )
lowerCamelCase : Optional[int] = field(default=2_56 , metadata={'help': 'Maximum number of newly generated tokens.'} )
lowerCamelCase : Optional[int] = field(default=0 , metadata={'help': 'Top-k parameter used for generation.'} )
lowerCamelCase : Optional[float] = field(default=0.95 , metadata={'help': 'Top-p parameter used for nucleus sampling.'} )
lowerCamelCase : Optional[int] = field(default=10 , metadata={'help': 'Number of generations to run in parallel.'} )
lowerCamelCase : Optional[int] = field(
default=2_00 , metadata={'help': 'Number of completions to generate for each sample.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase : Optional[str] = field(
default='eval_results.json' , metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase : Optional[str] = field(
default='0' , metadata={'help': 'Allow `code_eval` to execute Python code on machine'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={
'help': (
'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'
' number corresponds to which GPU device id to run on.'
)
} , )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={
'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'
} , )
lowerCamelCase : Optional[str] = field(
default='transformersbook/codeparrot' , metadata={'help': 'Folder or name of dataset to process.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot-clean' , metadata={'help': 'Folder to save processed processed dataset.'} )
lowerCamelCase : Optional[int] = field(
default=10_00_00 , metadata={'help': 'Number of files to save per JSON output file.'} )
lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase : Optional[float] = field(
default=10_00 , metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=1_00 , metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=0.25 , metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=1.5 , metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=0.7 , metadata={'help': 'Probability for filtering config, test and uncommon files.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} , )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'If True, near-duplicate samples are removed.'} )
lowerCamelCase : Optional[float] = field(
default=0.85 , metadata={'help': 'Jaccard threshold for near-duplicate samples.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='gpt2' , metadata={'help': 'Base tokenizer to build new tokenizer from.'} )
lowerCamelCase : Optional[str] = field(
default='transformersbook/codeparrot-train' , metadata={'help': 'Dataset to train tokenizer on.'} )
lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase : Optional[int] = field(default=20_00_00 , metadata={'help': 'Number of examples to train tokenizer on.'} )
lowerCamelCase : Optional[int] = field(
default=3_27_68 , metadata={'help': 'Number of examples to train the tokenizer on.'} )
lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of new tokenizer.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path to the dataset to pretokenize.'} )
lowerCamelCase : Optional[str] = field(
default='tokenized-codeparrot-train' , metadata={'help': 'Repo name of the pretokenized data.'} )
lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='gpt2-large' , metadata={'help': 'Configuration to use for model initialization.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Tokenizer attached to model.'} )
lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of the created model.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} ) | 687 | 1 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = XLMTokenizer
lowerCamelCase : Dict = False
def SCREAMING_SNAKE_CASE__ ( self: str ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase :int = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
_lowerCAmelCase :int = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
_lowerCAmelCase :Optional[int] = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
_lowerCAmelCase :Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: int ):
_lowerCAmelCase :Any = 'lower newer'
_lowerCAmelCase :Dict = 'lower newer'
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :List[Any] = XLMTokenizer(self.vocab_file , self.merges_file )
_lowerCAmelCase :Tuple = 'lower'
_lowerCAmelCase :List[Any] = ['low', 'er</w>']
_lowerCAmelCase :Union[str, Any] = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :str = tokens + ['<unk>']
_lowerCAmelCase :Dict = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :Optional[int] = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' )
_lowerCAmelCase :List[Any] = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1] | 687 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :List[str] = 'ylacombe/bark-small'
_lowerCAmelCase :int = tempfile.mkdtemp()
_lowerCAmelCase :List[str] = 'en_speaker_1'
_lowerCAmelCase :Union[str, Any] = 'This is a test string'
_lowerCAmelCase :List[Any] = 'speaker_embeddings_path.json'
_lowerCAmelCase :str = 'speaker_embeddings'
def SCREAMING_SNAKE_CASE__ ( self: str , **_UpperCAmelCase: Optional[Any] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :List[Any] = self.get_tokenizer()
_lowerCAmelCase :List[str] = BarkProcessor(tokenizer=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase :List[str] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_lowerCAmelCase :Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_lowerCAmelCase :Any = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_lowerCAmelCase :List[Any] = 35
_lowerCAmelCase :Optional[int] = 2
_lowerCAmelCase :Dict = 8
_lowerCAmelCase :Dict = {
'semantic_prompt': np.ones(_UpperCAmelCase ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_lowerCAmelCase :Dict = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
_lowerCAmelCase :List[Any] = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_lowerCAmelCase :int = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Dict = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_lowerCAmelCase :Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Tuple = self.get_tokenizer()
_lowerCAmelCase :Union[str, Any] = BarkProcessor(tokenizer=_UpperCAmelCase )
_lowerCAmelCase :List[Any] = processor(text=self.input_string )
_lowerCAmelCase :List[str] = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() ) | 687 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a = {
"""configuration_rag""": ["""RagConfig"""],
"""retrieval_rag""": ["""RagRetriever"""],
"""tokenization_rag""": ["""RagTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""RagModel""",
"""RagPreTrainedModel""",
"""RagSequenceForGeneration""",
"""RagTokenForGeneration""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""TFRagModel""",
"""TFRagPreTrainedModel""",
"""TFRagSequenceForGeneration""",
"""TFRagTokenForGeneration""",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : int = 'bert'
def __init__( self: Optional[Any] , _UpperCAmelCase: Tuple=3_0522 , _UpperCAmelCase: int=768 , _UpperCAmelCase: Union[str, Any]=12 , _UpperCAmelCase: Dict=12 , _UpperCAmelCase: List[Any]=3072 , _UpperCAmelCase: List[Any]="gelu" , _UpperCAmelCase: Union[str, Any]=0.1 , _UpperCAmelCase: Dict=0.1 , _UpperCAmelCase: List[Any]=512 , _UpperCAmelCase: Optional[Any]=2 , _UpperCAmelCase: Optional[int]=0.0_2 , _UpperCAmelCase: Any=1e-1_2 , _UpperCAmelCase: Optional[Any]=0 , _UpperCAmelCase: Union[str, Any]="absolute" , _UpperCAmelCase: Dict=True , _UpperCAmelCase: Optional[Any]=None , **_UpperCAmelCase: Optional[int] , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :List[Any] = vocab_size
_lowerCAmelCase :Tuple = hidden_size
_lowerCAmelCase :Dict = num_hidden_layers
_lowerCAmelCase :Optional[Any] = num_attention_heads
_lowerCAmelCase :List[Any] = hidden_act
_lowerCAmelCase :int = intermediate_size
_lowerCAmelCase :Tuple = hidden_dropout_prob
_lowerCAmelCase :Tuple = attention_probs_dropout_prob
_lowerCAmelCase :List[Any] = max_position_embeddings
_lowerCAmelCase :Dict = type_vocab_size
_lowerCAmelCase :Any = initializer_range
_lowerCAmelCase :int = layer_norm_eps
_lowerCAmelCase :List[Any] = position_embedding_type
_lowerCAmelCase :int = use_cache
_lowerCAmelCase :Union[str, Any] = classifier_dropout
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
if self.task == "multiple-choice":
_lowerCAmelCase :List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase :Any = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] ) | 687 | 1 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
a = logging.get_logger(__name__)
def UpperCamelCase_( __magic_name__ : Union[tf.Tensor, np.ndarray] ):
"""simple docstring"""
if isinstance(__magic_name__ , np.ndarray ):
return list(tensor.shape )
_lowerCAmelCase :Dict = tf.shape(__magic_name__ )
if tensor.shape == tf.TensorShape(__magic_name__ ):
return dynamic
_lowerCAmelCase :Optional[int] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(__magic_name__ )]
def UpperCamelCase_( __magic_name__ : tf.Tensor , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1e-9 , axis=__magic_name__ , name=__magic_name__ )
def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any]=1e-5 , __magic_name__ : Tuple=-1 ):
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__magic_name__ , __magic_name__ ):
raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' )
# Get mean and variance on the axis to be normalized
_lowerCAmelCase , _lowerCAmelCase :Any = tf.nn.moments(__magic_name__ , axes=[axis] , keepdims=__magic_name__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
_lowerCAmelCase :Optional[Any] = [1] * inputs.shape.rank
_lowerCAmelCase :Union[str, Any] = shape_list(__magic_name__ )[axis]
_lowerCAmelCase :Dict = tf.reshape(__magic_name__ , __magic_name__ )
_lowerCAmelCase :str = tf.reshape(__magic_name__ , __magic_name__ )
# Compute layer normalization using the batch_normalization
# function.
_lowerCAmelCase :str = tf.nn.batch_normalization(
__magic_name__ , __magic_name__ , __magic_name__ , offset=__magic_name__ , scale=__magic_name__ , variance_epsilon=__magic_name__ , )
return outputs
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : Tuple=0 , __magic_name__ : Dict=-1 ):
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
_lowerCAmelCase :Dict = tf.shape(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
_lowerCAmelCase :Union[str, Any] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(__magic_name__ , __magic_name__ )
def UpperCamelCase_( __magic_name__ : tf.Tensor ):
"""simple docstring"""
if not isinstance(__magic_name__ , tf.Tensor ):
_lowerCAmelCase :Union[str, Any] = tf.convert_to_tensor(__magic_name__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
_lowerCAmelCase :Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
_lowerCAmelCase :Dict = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
_lowerCAmelCase :str = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def UpperCamelCase_( __magic_name__ : tf.Tensor , __magic_name__ : int , __magic_name__ : str = "input_ids" ):
"""simple docstring"""
tf.debugging.assert_less(
__magic_name__ , tf.cast(__magic_name__ , dtype=tensor.dtype ) , message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(__magic_name__ )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ):
"""simple docstring"""
_lowerCAmelCase :Optional[int] = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
_lowerCAmelCase :Optional[Any] = [x for x in data if len(__magic_name__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'The following attributes cannot be saved to HDF5 file because '
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
_lowerCAmelCase :Tuple = np.asarray(__magic_name__ )
_lowerCAmelCase :Optional[int] = 1
_lowerCAmelCase :Any = np.array_split(__magic_name__ , __magic_name__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
_lowerCAmelCase :Union[str, Any] = np.array_split(__magic_name__ , __magic_name__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(__magic_name__ ):
_lowerCAmelCase :int = chunk_data
else:
_lowerCAmelCase :Tuple = data
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : List[str] ):
"""simple docstring"""
if name in group.attrs:
_lowerCAmelCase :Dict = [n.decode('utf8' ) if hasattr(__magic_name__ , 'decode' ) else n for n in group.attrs[name]]
else:
_lowerCAmelCase :List[Any] = []
_lowerCAmelCase :Tuple = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('utf8' ) if hasattr(__magic_name__ , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] )
chunk_id += 1
return data
def UpperCamelCase_( __magic_name__ : Dict ):
"""simple docstring"""
def _expand_single_ad_tensor(__magic_name__ : str ):
if isinstance(__magic_name__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(__magic_name__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , __magic_name__ ) | 687 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ):
"""simple docstring"""
if isinstance(__magic_name__ , torch.Tensor ):
return image
elif isinstance(__magic_name__ , PIL.Image.Image ):
_lowerCAmelCase :Tuple = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowerCAmelCase :List[Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_lowerCAmelCase :Optional[Any] = np.concatenate(__magic_name__ , axis=0 )
_lowerCAmelCase :Any = np.array(__magic_name__ ).astype(np.floataa ) / 255.0
_lowerCAmelCase :Optional[int] = image.transpose(0 , 3 , 1 , 2 )
_lowerCAmelCase :int = 2.0 * image - 1.0
_lowerCAmelCase :Optional[int] = torch.from_numpy(__magic_name__ )
elif isinstance(image[0] , torch.Tensor ):
_lowerCAmelCase :str = torch.cat(__magic_name__ , dim=0 )
return image
def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : int=0.9995 ):
"""simple docstring"""
if not isinstance(__magic_name__ , np.ndarray ):
_lowerCAmelCase :Tuple = True
_lowerCAmelCase :str = va.device
_lowerCAmelCase :List[str] = va.cpu().numpy()
_lowerCAmelCase :List[str] = va.cpu().numpy()
_lowerCAmelCase :Any = np.sum(va * va / (np.linalg.norm(__magic_name__ ) * np.linalg.norm(__magic_name__ )) )
if np.abs(__magic_name__ ) > DOT_THRESHOLD:
_lowerCAmelCase :Optional[Any] = (1 - t) * va + t * va
else:
_lowerCAmelCase :int = np.arccos(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = np.sin(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = theta_a * t
_lowerCAmelCase :str = np.sin(__magic_name__ )
_lowerCAmelCase :Any = np.sin(theta_a - theta_t ) / sin_theta_a
_lowerCAmelCase :Optional[Any] = sin_theta_t / sin_theta_a
_lowerCAmelCase :List[Any] = sa * va + sa * va
if inputs_are_torch:
_lowerCAmelCase :int = torch.from_numpy(__magic_name__ ).to(__magic_name__ )
return va
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :Any = F.normalize(__magic_name__ , dim=-1 )
_lowerCAmelCase :str = F.normalize(__magic_name__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
for param in model.parameters():
_lowerCAmelCase :List[str] = value
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def __init__( self: Any , _UpperCAmelCase: AutoencoderKL , _UpperCAmelCase: CLIPTextModel , _UpperCAmelCase: CLIPModel , _UpperCAmelCase: CLIPTokenizer , _UpperCAmelCase: UNetaDConditionModel , _UpperCAmelCase: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , _UpperCAmelCase: CLIPFeatureExtractor , _UpperCAmelCase: str=None , _UpperCAmelCase: Tuple=None , _UpperCAmelCase: Union[str, Any]=None , ):
super().__init__()
self.register_modules(
vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , clip_model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , coca_model=_UpperCAmelCase , coca_tokenizer=_UpperCAmelCase , coca_transform=_UpperCAmelCase , )
_lowerCAmelCase :int = (
feature_extractor.size
if isinstance(feature_extractor.size , _UpperCAmelCase )
else feature_extractor.size['shortest_edge']
)
_lowerCAmelCase :Union[str, Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _UpperCAmelCase )
set_requires_grad(self.clip_model , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowerCAmelCase :Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
self.enable_attention_slicing(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
set_requires_grad(self.vae , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
set_requires_grad(self.vae , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
set_requires_grad(self.unet , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
set_requires_grad(self.unet , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Dict ):
# get the original timestep using init_timestep
_lowerCAmelCase :Optional[Any] = min(int(num_inference_steps * strength ) , _UpperCAmelCase )
_lowerCAmelCase :List[str] = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase :Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Union[str, Any]=None ):
if not isinstance(_UpperCAmelCase , torch.Tensor ):
raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(_UpperCAmelCase )}""" )
_lowerCAmelCase :Union[str, Any] = image.to(device=_UpperCAmelCase , dtype=_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :List[Any] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCAmelCase )
]
_lowerCAmelCase :List[str] = torch.cat(_UpperCAmelCase , dim=0 )
else:
_lowerCAmelCase :List[str] = self.vae.encode(_UpperCAmelCase ).latent_dist.sample(_UpperCAmelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :List[Any] = 0.1_8_2_1_5 * init_latents
_lowerCAmelCase :List[Any] = init_latents.repeat_interleave(_UpperCAmelCase , dim=0 )
_lowerCAmelCase :Dict = randn_tensor(init_latents.shape , generator=_UpperCAmelCase , device=_UpperCAmelCase , dtype=_UpperCAmelCase )
# get latents
_lowerCAmelCase :Dict = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :List[str] = init_latents
return latents
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Union[str, Any] ):
_lowerCAmelCase :Optional[int] = self.coca_transform(_UpperCAmelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_lowerCAmelCase :Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_lowerCAmelCase :int = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: List[str] ):
_lowerCAmelCase :Optional[int] = self.feature_extractor.preprocess(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
_lowerCAmelCase :List[str] = self.clip_model.get_image_features(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase )
_lowerCAmelCase :Dict = image_embeddings_clip.repeat_interleave(_UpperCAmelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , _UpperCAmelCase: Dict , _UpperCAmelCase: str , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , ):
_lowerCAmelCase :Dict = latents.detach().requires_grad_()
_lowerCAmelCase :Optional[Any] = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
_lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_lowerCAmelCase :int = self.scheduler.alphas_cumprod[timestep]
_lowerCAmelCase :Optional[int] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase :str = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_lowerCAmelCase :Optional[Any] = torch.sqrt(_UpperCAmelCase )
_lowerCAmelCase :List[str] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _UpperCAmelCase ):
_lowerCAmelCase :Dict = self.scheduler.sigmas[index]
_lowerCAmelCase :Optional[Any] = latents - sigma * noise_pred
else:
raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :Tuple = 1 / 0.1_8_2_1_5 * sample
_lowerCAmelCase :Optional[Any] = self.vae.decode(_UpperCAmelCase ).sample
_lowerCAmelCase :List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase :Tuple = transforms.Resize(self.feature_extractor_size )(_UpperCAmelCase )
_lowerCAmelCase :Tuple = self.normalize(_UpperCAmelCase ).to(latents.dtype )
_lowerCAmelCase :List[Any] = self.clip_model.get_image_features(_UpperCAmelCase )
_lowerCAmelCase :List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase )
_lowerCAmelCase :Tuple = spherical_dist_loss(_UpperCAmelCase , _UpperCAmelCase ).mean() * clip_guidance_scale
_lowerCAmelCase :str = -torch.autograd.grad(_UpperCAmelCase , _UpperCAmelCase )[0]
if isinstance(self.scheduler , _UpperCAmelCase ):
_lowerCAmelCase :Union[str, Any] = latents.detach() + grads * (sigma**2)
_lowerCAmelCase :Dict = noise_pred_original
else:
_lowerCAmelCase :Optional[int] = noise_pred_original - torch.sqrt(_UpperCAmelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self: Optional[int] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: float = 0.6 , _UpperCAmelCase: Optional[int] = 50 , _UpperCAmelCase: Optional[float] = 7.5 , _UpperCAmelCase: Optional[int] = 1 , _UpperCAmelCase: float = 0.0 , _UpperCAmelCase: Optional[float] = 100 , _UpperCAmelCase: Optional[torch.Generator] = None , _UpperCAmelCase: Optional[str] = "pil" , _UpperCAmelCase: bool = True , _UpperCAmelCase: float = 0.8 , _UpperCAmelCase: float = 0.1 , _UpperCAmelCase: float = 0.1 , ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size:
raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(_UpperCAmelCase )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(_UpperCAmelCase , torch.Generator ) and batch_size > 1:
_lowerCAmelCase :int = [generator] + [None] * (batch_size - 1)
_lowerCAmelCase :List[Any] = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
_lowerCAmelCase :Optional[int] = [x[0] for x in coca_is_none if x[1]]
_lowerCAmelCase :List[str] = ', '.join(_UpperCAmelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_UpperCAmelCase ):
raise ValueError(
f"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowerCAmelCase :List[Any] = self.get_image_description(_UpperCAmelCase )
if style_prompt is None:
if len(_UpperCAmelCase ):
raise ValueError(
f"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowerCAmelCase :Any = self.get_image_description(_UpperCAmelCase )
# get prompt text embeddings for content and style
_lowerCAmelCase :Any = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , )
_lowerCAmelCase :str = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_lowerCAmelCase :int = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , )
_lowerCAmelCase :Union[str, Any] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_lowerCAmelCase :List[str] = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# duplicate text embeddings for each generation per prompt
_lowerCAmelCase :str = text_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 )
# set timesteps
_lowerCAmelCase :Any = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_lowerCAmelCase :Dict = {}
if accepts_offset:
_lowerCAmelCase :Optional[int] = 1
self.scheduler.set_timesteps(_UpperCAmelCase , **_UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_lowerCAmelCase , _lowerCAmelCase :List[str] = self.get_timesteps(_UpperCAmelCase , _UpperCAmelCase , self.device )
_lowerCAmelCase :int = timesteps[:1].repeat(_UpperCAmelCase )
# Preprocess image
_lowerCAmelCase :Dict = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :int = self.prepare_latents(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase )
_lowerCAmelCase :Any = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.prepare_latents(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase )
_lowerCAmelCase :str = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if clip_guidance_scale > 0:
_lowerCAmelCase :Optional[Any] = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Dict = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Any = slerp(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCAmelCase :int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase :Optional[int] = content_text_input.input_ids.shape[-1]
_lowerCAmelCase :Union[str, Any] = self.tokenizer([''] , padding='max_length' , max_length=_UpperCAmelCase , return_tensors='pt' )
_lowerCAmelCase :Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_lowerCAmelCase :Optional[int] = uncond_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase :int = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCAmelCase :Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_lowerCAmelCase :Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_lowerCAmelCase :Any = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device='cpu' , dtype=_UpperCAmelCase ).to(
self.device )
else:
_lowerCAmelCase :List[Any] = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_lowerCAmelCase :int = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase :Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase :Any = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase :Any = {}
if accepts_eta:
_lowerCAmelCase :Any = eta
# check if the scheduler accepts generator
_lowerCAmelCase :List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_lowerCAmelCase :List[Any] = generator
with self.progress_bar(total=_UpperCAmelCase ):
for i, t in enumerate(_UpperCAmelCase ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase :Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase :Tuple = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
_lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase :List[str] = noise_pred.chunk(2 )
_lowerCAmelCase :Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_lowerCAmelCase :List[Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_lowerCAmelCase , _lowerCAmelCase :List[str] = self.cond_fn(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase :str = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :str = 1 / 0.1_8_2_1_5 * latents
_lowerCAmelCase :Any = self.vae.decode(_UpperCAmelCase ).sample
_lowerCAmelCase :List[str] = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase :Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase :List[Any] = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_UpperCAmelCase , nsfw_content_detected=_UpperCAmelCase ) | 687 | 1 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
a = datasets.logging.get_logger(__name__)
a = """\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
"""
a = """\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project's README at https://github.com/google-research/bleurt#readme for more information.
"""
a = """
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
'scores': List of scores.
Examples:
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> bleurt = datasets.load_metric(\"bleurt\")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results[\"scores\"]])
[1.03, 1.04]
"""
a = {
"""bleurt-tiny-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip""",
"""bleurt-tiny-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip""",
"""bleurt-base-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip""",
"""bleurt-base-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip""",
"""bleurt-large-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip""",
"""bleurt-large-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip""",
"""BLEURT-20-D3""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip""",
"""BLEURT-20-D6""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip""",
"""BLEURT-20-D12""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip""",
"""BLEURT-20""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip""",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/google-research/bleurt' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/bleurt'] , reference_urls=['https://github.com/google-research/bleurt', 'https://arxiv.org/abs/2004.04696'] , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Dict ):
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
'Using default BLEURT-Base checkpoint for sequence maximum length 128. '
'You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').' )
_lowerCAmelCase :str = 'bleurt-base-128'
if self.config_name.lower() in CHECKPOINT_URLS:
_lowerCAmelCase :int = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
_lowerCAmelCase :Dict = self.config_name.upper()
else:
raise KeyError(
f"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" )
# download the model checkpoint specified by self.config_name and set up the scorer
_lowerCAmelCase :Optional[Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
_lowerCAmelCase :str = score.BleurtScorer(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: int , _UpperCAmelCase: Union[str, Any] ):
_lowerCAmelCase :Optional[int] = self.scorer.score(references=_UpperCAmelCase , candidates=_UpperCAmelCase )
return {"scores": scores} | 687 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :Optional[int] = list(__magic_name__ )
_lowerCAmelCase :Dict = list(__magic_name__ )
_lowerCAmelCase :Any = 0
for i in range(len(__magic_name__ ) ):
if lista[i] != lista[i]:
count += 1
_lowerCAmelCase :Union[str, Any] = '_'
if count > 1:
return False
else:
return "".join(__magic_name__ )
def UpperCamelCase_( __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :int = []
while True:
_lowerCAmelCase :str = ['$'] * len(__magic_name__ )
_lowerCAmelCase :Optional[int] = []
for i in range(len(__magic_name__ ) ):
for j in range(i + 1 , len(__magic_name__ ) ):
_lowerCAmelCase :int = compare_string(binary[i] , binary[j] )
if k is False:
_lowerCAmelCase :str = '*'
_lowerCAmelCase :Union[str, Any] = '*'
temp.append('X' )
for i in range(len(__magic_name__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__magic_name__ ) == 0:
return pi
_lowerCAmelCase :Any = list(set(__magic_name__ ) )
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : Sequence[float] ):
"""simple docstring"""
_lowerCAmelCase :str = []
for minterm in minterms:
_lowerCAmelCase :Any = ''
for _ in range(__magic_name__ ):
_lowerCAmelCase :Tuple = str(minterm % 2 ) + string
minterm //= 2
temp.append(__magic_name__ )
return temp
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str , __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = list(__magic_name__ )
_lowerCAmelCase :List[Any] = list(__magic_name__ )
_lowerCAmelCase :Optional[Any] = 0
for i in range(len(__magic_name__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase_( __magic_name__ : list[list[int]] , __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :str = []
_lowerCAmelCase :List[str] = [0] * len(__magic_name__ )
for i in range(len(chart[0] ) ):
_lowerCAmelCase :Dict = 0
_lowerCAmelCase :Optional[Any] = -1
for j in range(len(__magic_name__ ) ):
if chart[j][i] == 1:
count += 1
_lowerCAmelCase :List[Any] = j
if count == 1:
_lowerCAmelCase :Dict = 1
for i in range(len(__magic_name__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__magic_name__ ) ):
_lowerCAmelCase :Dict = 0
temp.append(prime_implicants[i] )
while True:
_lowerCAmelCase :Dict = 0
_lowerCAmelCase :Any = -1
_lowerCAmelCase :Optional[Any] = 0
for i in range(len(__magic_name__ ) ):
_lowerCAmelCase :str = chart[i].count(1 )
if count_n > max_n:
_lowerCAmelCase :Optional[Any] = count_n
_lowerCAmelCase :Dict = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__magic_name__ ) ):
_lowerCAmelCase :str = 0
def UpperCamelCase_( __magic_name__ : list[str] , __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :str = [[0 for x in range(len(__magic_name__ ) )] for x in range(len(__magic_name__ ) )]
for i in range(len(__magic_name__ ) ):
_lowerCAmelCase :Tuple = prime_implicants[i].count('_' )
for j in range(len(__magic_name__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , __magic_name__ ):
_lowerCAmelCase :str = 1
return chart
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :Tuple = int(input('Enter the no. of variables\n' ) )
_lowerCAmelCase :Tuple = [
float(__magic_name__ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
_lowerCAmelCase :List[str] = decimal_to_binary(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Any = check(__magic_name__ )
print('Prime Implicants are:' )
print(__magic_name__ )
_lowerCAmelCase :List[Any] = prime_implicant_chart(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Tuple = selection(__magic_name__ , __magic_name__ )
print('Essential Prime Implicants are:' )
print(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 687 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
a = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
a = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
a = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[int]=4 , _UpperCAmelCase: Optional[int]=False ):
_lowerCAmelCase :Any = compute_bleu(
reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) :Tuple = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
} | 687 | 1 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: Dict , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: str=13 , _UpperCAmelCase: List[str]=7 , _UpperCAmelCase: List[Any]=True , _UpperCAmelCase: Dict=True , _UpperCAmelCase: Dict=True , _UpperCAmelCase: Union[str, Any]=True , _UpperCAmelCase: List[str]=True , _UpperCAmelCase: Any=False , _UpperCAmelCase: str=False , _UpperCAmelCase: Optional[Any]=False , _UpperCAmelCase: List[str]=2 , _UpperCAmelCase: List[Any]=99 , _UpperCAmelCase: Optional[int]=0 , _UpperCAmelCase: Optional[int]=32 , _UpperCAmelCase: int=5 , _UpperCAmelCase: int=4 , _UpperCAmelCase: Dict=0.1 , _UpperCAmelCase: str=0.1 , _UpperCAmelCase: List[Any]=512 , _UpperCAmelCase: str=2 , _UpperCAmelCase: Dict=0.0_2 , _UpperCAmelCase: Dict=2 , _UpperCAmelCase: Tuple=4 , _UpperCAmelCase: Any="last" , _UpperCAmelCase: Optional[Any]=True , _UpperCAmelCase: Optional[int]=None , _UpperCAmelCase: int=0 , ):
_lowerCAmelCase :Union[str, Any] = parent
_lowerCAmelCase :str = batch_size
_lowerCAmelCase :Tuple = seq_length
_lowerCAmelCase :Tuple = is_training
_lowerCAmelCase :Union[str, Any] = use_input_lengths
_lowerCAmelCase :Tuple = use_token_type_ids
_lowerCAmelCase :Tuple = use_labels
_lowerCAmelCase :Any = gelu_activation
_lowerCAmelCase :Dict = sinusoidal_embeddings
_lowerCAmelCase :Any = causal
_lowerCAmelCase :Optional[int] = asm
_lowerCAmelCase :List[str] = n_langs
_lowerCAmelCase :str = vocab_size
_lowerCAmelCase :Optional[Any] = n_special
_lowerCAmelCase :Tuple = hidden_size
_lowerCAmelCase :Tuple = num_hidden_layers
_lowerCAmelCase :int = num_attention_heads
_lowerCAmelCase :str = hidden_dropout_prob
_lowerCAmelCase :int = attention_probs_dropout_prob
_lowerCAmelCase :str = max_position_embeddings
_lowerCAmelCase :Optional[Any] = type_sequence_label_size
_lowerCAmelCase :Optional[Any] = initializer_range
_lowerCAmelCase :List[str] = num_labels
_lowerCAmelCase :Tuple = num_choices
_lowerCAmelCase :Any = summary_type
_lowerCAmelCase :List[str] = use_proj
_lowerCAmelCase :str = scope
_lowerCAmelCase :Dict = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase :Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase :str = None
if self.use_input_lengths:
_lowerCAmelCase :Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_lowerCAmelCase :str = None
if self.use_token_type_ids:
_lowerCAmelCase :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_lowerCAmelCase :Union[str, Any] = None
_lowerCAmelCase :Optional[Any] = None
_lowerCAmelCase :Dict = None
if self.use_labels:
_lowerCAmelCase :str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase :str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase :List[str] = ids_tensor([self.batch_size] , 2 ).float()
_lowerCAmelCase :Tuple = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase :List[str] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: str , _UpperCAmelCase: Any , _UpperCAmelCase: Any , _UpperCAmelCase: int , ):
_lowerCAmelCase :Tuple = XLMModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :List[str] = model(_UpperCAmelCase , lengths=_UpperCAmelCase , langs=_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = model(_UpperCAmelCase , langs=_UpperCAmelCase )
_lowerCAmelCase :Tuple = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: List[str] , _UpperCAmelCase: int , _UpperCAmelCase: List[str] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Any , _UpperCAmelCase: Any , _UpperCAmelCase: List[Any] , _UpperCAmelCase: Dict , ):
_lowerCAmelCase :List[Any] = XLMWithLMHeadModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Any = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: List[Any] , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Any , _UpperCAmelCase: List[Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Union[str, Any] , ):
_lowerCAmelCase :Tuple = XLMForQuestionAnsweringSimple(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Dict = model(_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
_lowerCAmelCase :Dict = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Dict , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: int , ):
_lowerCAmelCase :Tuple = XLMForQuestionAnswering(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
_lowerCAmelCase :Tuple = model(
_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , p_mask=_UpperCAmelCase , )
_lowerCAmelCase :Optional[Any] = model(
_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , )
((_lowerCAmelCase) , ) :List[str] = result_with_labels.to_tuple()
_lowerCAmelCase :Union[str, Any] = model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
((_lowerCAmelCase) , ) :Optional[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def SCREAMING_SNAKE_CASE__ ( self: str , _UpperCAmelCase: str , _UpperCAmelCase: List[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: List[str] , _UpperCAmelCase: Dict , _UpperCAmelCase: List[str] , _UpperCAmelCase: str , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[int] , ):
_lowerCAmelCase :Dict = XLMForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
_lowerCAmelCase :Dict = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: str , _UpperCAmelCase: Any , _UpperCAmelCase: Any , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Dict , _UpperCAmelCase: Tuple , ):
_lowerCAmelCase :Dict = self.num_labels
_lowerCAmelCase :str = XLMForTokenClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: str , _UpperCAmelCase: str , _UpperCAmelCase: Tuple , _UpperCAmelCase: Any , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: List[str] , _UpperCAmelCase: Any , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: int , ):
_lowerCAmelCase :Optional[Any] = self.num_choices
_lowerCAmelCase :Union[str, Any] = XLMForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase :Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase :List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase :Optional[int] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :int = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) :List[str] = config_and_inputs
_lowerCAmelCase :Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ (snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : str = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase : List[Any] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowerCamelCase : int = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Dict , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: List[Any] , _UpperCAmelCase: Optional[int] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Any , _UpperCAmelCase: str=False ):
_lowerCAmelCase :Any = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_lowerCAmelCase :str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
_lowerCAmelCase :Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :Optional[int] = XLMModelTester(self )
_lowerCAmelCase :Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase , emb_dim=37 )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] , _UpperCAmelCase: Any , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: str=False , _UpperCAmelCase: Any=1 ):
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(
[isinstance(_UpperCAmelCase , _UpperCAmelCase ) for iter_attentions in attentions] , [True] * len(_UpperCAmelCase ) )
self.assertEqual(len(_UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_UpperCAmelCase ):
# adds PAD dummy token
_lowerCAmelCase :Tuple = min_length + idx + 1
_lowerCAmelCase :Optional[int] = min_length + idx + 1
_lowerCAmelCase :Tuple = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Tuple , _UpperCAmelCase: str , _UpperCAmelCase: str , _UpperCAmelCase: List[Any]=False , _UpperCAmelCase: List[str]=1 ):
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(
[isinstance(_UpperCAmelCase , _UpperCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(_UpperCAmelCase ) , )
self.assertEqual(len(_UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_UpperCAmelCase ):
# adds PAD dummy token
_lowerCAmelCase :List[str] = min_length + idx + 1
_lowerCAmelCase :Union[str, Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_UpperCAmelCase ) , )
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self: Any ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase :int = XLMModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Optional[Any] = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = torch.tensor([[14, 447]] , dtype=torch.long , device=_UpperCAmelCase ) # the president
_lowerCAmelCase :List[str] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_lowerCAmelCase :Any = model.generate(_UpperCAmelCase , do_sample=_UpperCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _UpperCAmelCase ) | 687 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 | 1 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
a = Mapping[str, np.ndarray]
a = Mapping[str, Any] # Is a nested dict.
a = 0.0_1
@dataclasses.dataclass(frozen=snake_case__ )
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
lowerCamelCase : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
lowerCamelCase : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
lowerCamelCase : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
lowerCamelCase : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
lowerCamelCase : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
lowerCamelCase : Optional[str] = None
# Templates used to generate this protein (prediction-only)
lowerCamelCase : Optional[Sequence[str]] = None
# Chain corresponding to each parent
lowerCamelCase : Optional[Sequence[int]] = None
def UpperCamelCase_( __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :str = r'(\[[A-Z]+\]\n)'
_lowerCAmelCase :List[str] = [tag.strip() for tag in re.split(__magic_name__ , __magic_name__ ) if len(__magic_name__ ) > 0]
_lowerCAmelCase :Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split('\n' ) for l in tags[1::2]] )
_lowerCAmelCase :List[str] = ["N", "CA", "C"]
_lowerCAmelCase :Optional[Any] = None
_lowerCAmelCase :str = None
_lowerCAmelCase :Optional[int] = None
for g in groups:
if "[PRIMARY]" == g[0]:
_lowerCAmelCase :Union[str, Any] = g[1][0].strip()
for i in range(len(__magic_name__ ) ):
if seq[i] not in residue_constants.restypes:
_lowerCAmelCase :Optional[int] = 'X' # FIXME: strings are immutable
_lowerCAmelCase :List[str] = np.array(
[residue_constants.restype_order.get(__magic_name__ , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
_lowerCAmelCase :List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__magic_name__ , g[1][axis].split() ) ) )
_lowerCAmelCase :str = np.array(__magic_name__ )
_lowerCAmelCase :str = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__magic_name__ ):
_lowerCAmelCase :List[str] = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
_lowerCAmelCase :List[str] = np.array(list(map({'-': 0, '+': 1}.get , g[1][0].strip() ) ) )
_lowerCAmelCase :Tuple = np.zeros(
(
len(__magic_name__ ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__magic_name__ ):
_lowerCAmelCase :List[Any] = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__magic_name__ , atom_mask=__magic_name__ , aatype=__magic_name__ , residue_index=np.arange(len(__magic_name__ ) ) , b_factors=__magic_name__ , )
def UpperCamelCase_( __magic_name__ : Protein , __magic_name__ : int = 0 ):
"""simple docstring"""
_lowerCAmelCase :List[str] = []
_lowerCAmelCase :List[Any] = prot.remark
if remark is not None:
pdb_headers.append(f"""REMARK {remark}""" )
_lowerCAmelCase :Union[str, Any] = prot.parents
_lowerCAmelCase :Dict = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
_lowerCAmelCase :int = [p for i, p in zip(__magic_name__ , __magic_name__ ) if i == chain_id]
if parents is None or len(__magic_name__ ) == 0:
_lowerCAmelCase :int = ['N/A']
pdb_headers.append(f"""PARENT {" ".join(__magic_name__ )}""" )
return pdb_headers
def UpperCamelCase_( __magic_name__ : Protein , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :List[str] = []
_lowerCAmelCase :Any = pdb_str.split('\n' )
_lowerCAmelCase :int = prot.remark
if remark is not None:
out_pdb_lines.append(f"""REMARK {remark}""" )
_lowerCAmelCase :List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
_lowerCAmelCase :Optional[Any] = []
if prot.parents_chain_index is not None:
_lowerCAmelCase :Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__magic_name__ ) , [] )
parent_dict[str(__magic_name__ )].append(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = max([int(__magic_name__ ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
_lowerCAmelCase :Dict = parent_dict.get(str(__magic_name__ ) , ['N/A'] )
parents_per_chain.append(__magic_name__ )
else:
parents_per_chain.append(list(prot.parents ) )
else:
_lowerCAmelCase :str = [['N/A']]
def make_parent_line(__magic_name__ : Sequence[str] ) -> str:
return f"""PARENT {" ".join(__magic_name__ )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
_lowerCAmelCase :List[str] = 0
for i, l in enumerate(__magic_name__ ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__magic_name__ )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__magic_name__ ):
_lowerCAmelCase :int = parents_per_chain[chain_counter]
else:
_lowerCAmelCase :Tuple = ['N/A']
out_pdb_lines.append(make_parent_line(__magic_name__ ) )
return "\n".join(__magic_name__ )
def UpperCamelCase_( __magic_name__ : Protein ):
"""simple docstring"""
_lowerCAmelCase :Optional[int] = residue_constants.restypes + ['X']
def res_atoa(__magic_name__ : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , 'UNK' )
_lowerCAmelCase :int = residue_constants.atom_types
_lowerCAmelCase :List[str] = []
_lowerCAmelCase :Dict = prot.atom_mask
_lowerCAmelCase :str = prot.aatype
_lowerCAmelCase :Dict = prot.atom_positions
_lowerCAmelCase :Tuple = prot.residue_index.astype(np.intaa )
_lowerCAmelCase :str = prot.b_factors
_lowerCAmelCase :Dict = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('Invalid aatypes.' )
_lowerCAmelCase :Dict = get_pdb_headers(__magic_name__ )
if len(__magic_name__ ) > 0:
pdb_lines.extend(__magic_name__ )
_lowerCAmelCase :Optional[Any] = aatype.shape[0]
_lowerCAmelCase :Any = 1
_lowerCAmelCase :List[str] = 0
_lowerCAmelCase :Optional[Any] = string.ascii_uppercase
_lowerCAmelCase :List[str] = None
# Add all atom sites.
for i in range(__magic_name__ ):
_lowerCAmelCase :Optional[int] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__magic_name__ , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
_lowerCAmelCase :Optional[Any] = 'ATOM'
_lowerCAmelCase :Tuple = atom_name if len(__magic_name__ ) == 4 else f""" {atom_name}"""
_lowerCAmelCase :Union[str, Any] = ''
_lowerCAmelCase :Tuple = ''
_lowerCAmelCase :Optional[Any] = 1.00
_lowerCAmelCase :Dict = atom_name[0] # Protein supports only C, N, O, S, this works.
_lowerCAmelCase :Dict = ''
_lowerCAmelCase :Union[str, Any] = 'A'
if chain_index is not None:
_lowerCAmelCase :List[str] = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
_lowerCAmelCase :Optional[int] = (
f"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
f"""{res_name_a:>3} {chain_tag:>1}"""
f"""{residue_index[i]:>4}{insertion_code:>1} """
f"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
f"""{occupancy:>6.2f}{b_factor:>6.2f} """
f"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__magic_name__ )
atom_index += 1
_lowerCAmelCase :List[Any] = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
_lowerCAmelCase :str = True
_lowerCAmelCase :Dict = chain_index[i + 1]
if should_terminate:
# Close the chain.
_lowerCAmelCase :List[str] = 'TER'
_lowerCAmelCase :Tuple = (
f"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__magic_name__ )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__magic_name__ , __magic_name__ ) )
pdb_lines.append('END' )
pdb_lines.append('' )
return "\n".join(__magic_name__ )
def UpperCamelCase_( __magic_name__ : Protein ):
"""simple docstring"""
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def UpperCamelCase_( __magic_name__ : FeatureDict , __magic_name__ : ModelOutput , __magic_name__ : Optional[np.ndarray] = None , __magic_name__ : Optional[np.ndarray] = None , __magic_name__ : Optional[str] = None , __magic_name__ : Optional[Sequence[str]] = None , __magic_name__ : Optional[Sequence[int]] = None , ):
"""simple docstring"""
return Protein(
aatype=features['aatype'] , atom_positions=result['final_atom_positions'] , atom_mask=result['final_atom_mask'] , residue_index=features['residue_index'] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['final_atom_mask'] ) , chain_index=__magic_name__ , remark=__magic_name__ , parents=__magic_name__ , parents_chain_index=__magic_name__ , ) | 687 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self: str , _UpperCAmelCase: str , _UpperCAmelCase: Optional[int]=7 , _UpperCAmelCase: Union[str, Any]=3 , _UpperCAmelCase: int=18 , _UpperCAmelCase: List[Any]=30 , _UpperCAmelCase: List[Any]=400 , _UpperCAmelCase: Optional[Any]=True , _UpperCAmelCase: Any=None , _UpperCAmelCase: Any=True , _UpperCAmelCase: int=None , _UpperCAmelCase: Union[str, Any]=True , ):
_lowerCAmelCase :Tuple = size if size is not None else {'shortest_edge': 20}
_lowerCAmelCase :str = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_lowerCAmelCase :str = parent
_lowerCAmelCase :List[Any] = batch_size
_lowerCAmelCase :Optional[Any] = num_channels
_lowerCAmelCase :Optional[Any] = image_size
_lowerCAmelCase :int = min_resolution
_lowerCAmelCase :List[str] = max_resolution
_lowerCAmelCase :List[str] = do_resize
_lowerCAmelCase :Optional[int] = size
_lowerCAmelCase :str = do_center_crop
_lowerCAmelCase :int = crop_size
_lowerCAmelCase :Optional[int] = do_flip_channel_order
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Any = MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = MobileViTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self: str ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'center_crop' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_flip_channel_order' ) )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_lowerCAmelCase :Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self: int ):
# Initialize image_processing
_lowerCAmelCase :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
_lowerCAmelCase :Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :str = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
# Initialize image_processing
_lowerCAmelCase :int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :List[str] = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
# Initialize image_processing
_lowerCAmelCase :Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :int = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , ) | 687 | 1 |
def UpperCamelCase_( __magic_name__ : int = 3 , __magic_name__ : int = 7 , __magic_name__ : int = 1000000 ):
"""simple docstring"""
_lowerCAmelCase :Tuple = 0
_lowerCAmelCase :Tuple = 1
for current_denominator in range(1 , limit + 1 ):
_lowerCAmelCase :str = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
_lowerCAmelCase :Tuple = current_numerator
_lowerCAmelCase :Dict = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000)) | 687 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ (datasets.BuilderConfig ):
"""simple docstring"""
lowerCamelCase : Optional[datasets.Features] = None
class UpperCAmelCase_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCamelCase : Any = PandasConfig
def SCREAMING_SNAKE_CASE__ ( self: int ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: List[str] ):
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_lowerCAmelCase :Dict = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_UpperCAmelCase , (str, list, tuple) ):
_lowerCAmelCase :Any = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase :List[Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
_lowerCAmelCase :Any = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase :Union[str, Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={'files': files} ) )
return splits
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: pa.Table ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase :str = table_cast(_UpperCAmelCase , self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: Dict ):
for i, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase ) ):
with open(_UpperCAmelCase , 'rb' ) as f:
_lowerCAmelCase :Optional[Any] = pa.Table.from_pandas(pd.read_pickle(_UpperCAmelCase ) )
yield i, self._cast_table(_UpperCAmelCase ) | 687 | 1 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
a = {
"""sample_size""": 32,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [32, 64],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a = {
"""sample_size""": 64,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a = {
"""sample_size""": 256,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a = {
"""num_train_timesteps""": 40,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
a = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
a = {
"""num_train_timesteps""": 151,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
def UpperCamelCase_( __magic_name__ : Dict ):
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any]=False ):
"""simple docstring"""
_lowerCAmelCase :int = checkpoint[f"""{old_prefix}.in_layers.0.weight"""]
_lowerCAmelCase :Union[str, Any] = checkpoint[f"""{old_prefix}.in_layers.0.bias"""]
_lowerCAmelCase :str = checkpoint[f"""{old_prefix}.in_layers.2.weight"""]
_lowerCAmelCase :Optional[Any] = checkpoint[f"""{old_prefix}.in_layers.2.bias"""]
_lowerCAmelCase :str = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""]
_lowerCAmelCase :Any = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""]
_lowerCAmelCase :str = checkpoint[f"""{old_prefix}.out_layers.0.weight"""]
_lowerCAmelCase :List[Any] = checkpoint[f"""{old_prefix}.out_layers.0.bias"""]
_lowerCAmelCase :Optional[int] = checkpoint[f"""{old_prefix}.out_layers.3.weight"""]
_lowerCAmelCase :Dict = checkpoint[f"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
_lowerCAmelCase :List[Any] = checkpoint[f"""{old_prefix}.skip_connection.weight"""]
_lowerCAmelCase :int = checkpoint[f"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : List[str]=None ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Tuple = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Any = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
_lowerCAmelCase :int = checkpoint[f"""{old_prefix}.norm.weight"""]
_lowerCAmelCase :Dict = checkpoint[f"""{old_prefix}.norm.bias"""]
_lowerCAmelCase :Dict = weight_q.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :str = bias_q.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :List[str] = weight_k.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :Optional[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :Tuple = weight_v.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :List[Any] = bias_v.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :int = (
checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
_lowerCAmelCase :Optional[Any] = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Optional[Any] ):
"""simple docstring"""
_lowerCAmelCase :Union[str, Any] = torch.load(__magic_name__ , map_location='cpu' )
_lowerCAmelCase :List[Any] = {}
_lowerCAmelCase :List[str] = checkpoint['time_embed.0.weight']
_lowerCAmelCase :Tuple = checkpoint['time_embed.0.bias']
_lowerCAmelCase :Dict = checkpoint['time_embed.2.weight']
_lowerCAmelCase :Union[str, Any] = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_lowerCAmelCase :Union[str, Any] = checkpoint['label_emb.weight']
_lowerCAmelCase :str = checkpoint['input_blocks.0.0.weight']
_lowerCAmelCase :str = checkpoint['input_blocks.0.0.bias']
_lowerCAmelCase :List[Any] = unet_config['down_block_types']
_lowerCAmelCase :Any = unet_config['layers_per_block']
_lowerCAmelCase :List[Any] = unet_config['attention_head_dim']
_lowerCAmelCase :Tuple = unet_config['block_out_channels']
_lowerCAmelCase :List[str] = 1
_lowerCAmelCase :Optional[int] = channels_list[0]
for i, layer_type in enumerate(__magic_name__ ):
_lowerCAmelCase :Tuple = channels_list[i]
_lowerCAmelCase :Optional[Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__magic_name__ ):
_lowerCAmelCase :int = f"""down_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :List[Any] = f"""input_blocks.{current_layer}.0"""
_lowerCAmelCase :int = True if j == 0 and downsample_block_has_skip else False
_lowerCAmelCase :List[Any] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__magic_name__ ):
_lowerCAmelCase :List[str] = f"""down_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :Optional[int] = f"""input_blocks.{current_layer}.0"""
_lowerCAmelCase :List[str] = True if j == 0 and downsample_block_has_skip else False
_lowerCAmelCase :Optional[int] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
_lowerCAmelCase :Optional[int] = f"""down_blocks.{i}.attentions.{j}"""
_lowerCAmelCase :str = f"""input_blocks.{current_layer}.1"""
_lowerCAmelCase :Optional[Any] = convert_attention(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
_lowerCAmelCase :Union[str, Any] = f"""down_blocks.{i}.downsamplers.0"""
_lowerCAmelCase :Tuple = f"""input_blocks.{current_layer}.0"""
_lowerCAmelCase :Optional[int] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
current_layer += 1
_lowerCAmelCase :Dict = current_channels
# hardcoded the mid-block for now
_lowerCAmelCase :int = 'mid_block.resnets.0'
_lowerCAmelCase :Optional[Any] = 'middle_block.0'
_lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Optional[int] = 'mid_block.attentions.0'
_lowerCAmelCase :Optional[int] = 'middle_block.1'
_lowerCAmelCase :List[Any] = convert_attention(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Union[str, Any] = 'mid_block.resnets.1'
_lowerCAmelCase :Optional[int] = 'middle_block.2'
_lowerCAmelCase :int = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Tuple = 0
_lowerCAmelCase :str = unet_config['up_block_types']
for i, layer_type in enumerate(__magic_name__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCAmelCase :Optional[Any] = f"""up_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :Dict = f"""output_blocks.{current_layer}.0"""
_lowerCAmelCase :Any = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
_lowerCAmelCase :Any = f"""up_blocks.{i}.upsamplers.0"""
_lowerCAmelCase :Dict = f"""output_blocks.{current_layer-1}.1"""
_lowerCAmelCase :Tuple = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCAmelCase :Tuple = f"""up_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :List[str] = f"""output_blocks.{current_layer}.0"""
_lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
_lowerCAmelCase :str = f"""up_blocks.{i}.attentions.{j}"""
_lowerCAmelCase :List[Any] = f"""output_blocks.{current_layer}.1"""
_lowerCAmelCase :int = convert_attention(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
_lowerCAmelCase :Optional[int] = f"""up_blocks.{i}.upsamplers.0"""
_lowerCAmelCase :int = f"""output_blocks.{current_layer-1}.2"""
_lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :str = checkpoint['out.0.weight']
_lowerCAmelCase :Union[str, Any] = checkpoint['out.0.bias']
_lowerCAmelCase :List[Any] = checkpoint['out.2.weight']
_lowerCAmelCase :Dict = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
a = parser.parse_args()
a = strabool(args.class_cond)
a = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
a = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
a = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
a = None
a = con_pt_to_diffuser(args.unet_path, unet_config)
a = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
a = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
a = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
a = CMStochasticIterativeScheduler(**scheduler_config)
a = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path) | 687 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
a = """"""
a = """"""
a = """"""
a = 1 # (0 is vertical, 1 is horizontal)
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase :Union[str, Any] = get_dataset(__magic_name__ , __magic_name__ )
print('Processing...' )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = update_image_and_anno(__magic_name__ , __magic_name__ , __magic_name__ )
for index, image in enumerate(__magic_name__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase :Optional[Any] = random_chars(32 )
_lowerCAmelCase :str = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
_lowerCAmelCase :Tuple = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , __magic_name__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(__magic_name__ )} with {file_name}""" )
_lowerCAmelCase :str = []
for anno in new_annos[index]:
_lowerCAmelCase :List[str] = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(__magic_name__ )
with open(f"""/{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :int = []
_lowerCAmelCase :Union[str, Any] = []
for label_file in glob.glob(os.path.join(__magic_name__ , '*.txt' ) ):
_lowerCAmelCase :Optional[int] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(__magic_name__ ) as in_file:
_lowerCAmelCase :Union[str, Any] = in_file.readlines()
_lowerCAmelCase :List[Any] = os.path.join(__magic_name__ , f"""{label_name}.jpg""" )
_lowerCAmelCase :Tuple = []
for obj_list in obj_lists:
_lowerCAmelCase :Union[str, Any] = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__magic_name__ )
labels.append(__magic_name__ )
return img_paths, labels
def UpperCamelCase_( __magic_name__ : list , __magic_name__ : list , __magic_name__ : int = 1 ):
"""simple docstring"""
_lowerCAmelCase :str = []
_lowerCAmelCase :Any = []
_lowerCAmelCase :Optional[Any] = []
for idx in range(len(__magic_name__ ) ):
_lowerCAmelCase :Optional[int] = []
_lowerCAmelCase :Optional[Any] = img_list[idx]
path_list.append(__magic_name__ )
_lowerCAmelCase :List[str] = anno_list[idx]
_lowerCAmelCase :Optional[Any] = cva.imread(__magic_name__ )
if flip_type == 1:
_lowerCAmelCase :List[Any] = cva.flip(__magic_name__ , __magic_name__ )
for bbox in img_annos:
_lowerCAmelCase :List[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_lowerCAmelCase :List[str] = cva.flip(__magic_name__ , __magic_name__ )
for bbox in img_annos:
_lowerCAmelCase :List[str] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__magic_name__ )
new_imgs_list.append(__magic_name__ )
return new_imgs_list, new_annos_lists, path_list
def UpperCamelCase_( __magic_name__ : int = 32 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase :str = ascii_lowercase + digits
return "".join(random.choice(__magic_name__ ) for _ in range(__magic_name__ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""") | 687 | 1 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a = logging.get_logger(__name__)
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Optional[int] = 'linear'
lowerCamelCase : Any = 'cosine'
lowerCamelCase : int = 'cosine_with_restarts'
lowerCamelCase : List[str] = 'polynomial'
lowerCamelCase : int = 'constant'
lowerCamelCase : Union[str, Any] = 'constant_with_warmup'
lowerCamelCase : Optional[Any] = 'piecewise_constant'
def UpperCamelCase_( __magic_name__ : Optimizer , __magic_name__ : int = -1 ):
"""simple docstring"""
return LambdaLR(__magic_name__ , lambda __magic_name__ : 1 , last_epoch=__magic_name__ )
def UpperCamelCase_( __magic_name__ : Optimizer , __magic_name__ : int , __magic_name__ : int = -1 ):
"""simple docstring"""
def lr_lambda(__magic_name__ : int ):
if current_step < num_warmup_steps:
return float(__magic_name__ ) / float(max(1.0 , __magic_name__ ) )
return 1.0
return LambdaLR(__magic_name__ , __magic_name__ , last_epoch=__magic_name__ )
def UpperCamelCase_( __magic_name__ : Optimizer , __magic_name__ : str , __magic_name__ : int = -1 ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = {}
_lowerCAmelCase :Tuple = step_rules.split(',' )
for rule_str in rule_list[:-1]:
_lowerCAmelCase , _lowerCAmelCase :Union[str, Any] = rule_str.split(':' )
_lowerCAmelCase :List[Any] = int(__magic_name__ )
_lowerCAmelCase :int = float(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = value
_lowerCAmelCase :Optional[int] = float(rule_list[-1] )
def create_rules_function(__magic_name__ : Any , __magic_name__ : Optional[Any] ):
def rule_func(__magic_name__ : int ) -> float:
_lowerCAmelCase :Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__magic_name__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_lowerCAmelCase :Tuple = create_rules_function(__magic_name__ , __magic_name__ )
return LambdaLR(__magic_name__ , __magic_name__ , last_epoch=__magic_name__ )
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : Dict=-1 ):
"""simple docstring"""
def lr_lambda(__magic_name__ : int ):
if current_step < num_warmup_steps:
return float(__magic_name__ ) / float(max(1 , __magic_name__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__magic_name__ , __magic_name__ , __magic_name__ )
def UpperCamelCase_( __magic_name__ : Optimizer , __magic_name__ : int , __magic_name__ : int , __magic_name__ : float = 0.5 , __magic_name__ : int = -1 ):
"""simple docstring"""
def lr_lambda(__magic_name__ : Any ):
if current_step < num_warmup_steps:
return float(__magic_name__ ) / float(max(1 , __magic_name__ ) )
_lowerCAmelCase :Union[str, Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__magic_name__ ) * 2.0 * progress )) )
return LambdaLR(__magic_name__ , __magic_name__ , __magic_name__ )
def UpperCamelCase_( __magic_name__ : Optimizer , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 1 , __magic_name__ : int = -1 ):
"""simple docstring"""
def lr_lambda(__magic_name__ : str ):
if current_step < num_warmup_steps:
return float(__magic_name__ ) / float(max(1 , __magic_name__ ) )
_lowerCAmelCase :Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__magic_name__ ) * progress) % 1.0) )) )
return LambdaLR(__magic_name__ , __magic_name__ , __magic_name__ )
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : List[Any]=1e-7 , __magic_name__ : Optional[Any]=1.0 , __magic_name__ : Optional[int]=-1 ):
"""simple docstring"""
_lowerCAmelCase :Tuple = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__magic_name__ : int ):
if current_step < num_warmup_steps:
return float(__magic_name__ ) / float(max(1 , __magic_name__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_lowerCAmelCase :str = lr_init - lr_end
_lowerCAmelCase :str = num_training_steps - num_warmup_steps
_lowerCAmelCase :Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
_lowerCAmelCase :Union[str, Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__magic_name__ , __magic_name__ , __magic_name__ )
a = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def UpperCamelCase_( __magic_name__ : Union[str, SchedulerType] , __magic_name__ : Optimizer , __magic_name__ : Optional[str] = None , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[int] = None , __magic_name__ : int = 1 , __magic_name__ : float = 1.0 , __magic_name__ : int = -1 , ):
"""simple docstring"""
_lowerCAmelCase :Optional[int] = SchedulerType(__magic_name__ )
_lowerCAmelCase :int = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__magic_name__ , last_epoch=__magic_name__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__magic_name__ , step_rules=__magic_name__ , last_epoch=__magic_name__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__magic_name__ , num_warmup_steps=__magic_name__ , last_epoch=__magic_name__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__magic_name__ , num_warmup_steps=__magic_name__ , num_training_steps=__magic_name__ , num_cycles=__magic_name__ , last_epoch=__magic_name__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__magic_name__ , num_warmup_steps=__magic_name__ , num_training_steps=__magic_name__ , power=__magic_name__ , last_epoch=__magic_name__ , )
return schedule_func(
__magic_name__ , num_warmup_steps=__magic_name__ , num_training_steps=__magic_name__ , last_epoch=__magic_name__ ) | 687 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
a = logging.get_logger(__name__)
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = nn.functional.normalize(__magic_name__ )
_lowerCAmelCase :List[str] = nn.functional.normalize(__magic_name__ )
return torch.mm(__magic_name__ , normalized_text_embeds.t() )
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : str = CLIPConfig
lowerCamelCase : Any = ['CLIPEncoderLayer']
def __init__( self: Optional[int] , _UpperCAmelCase: CLIPConfig ):
super().__init__(_UpperCAmelCase )
_lowerCAmelCase :Any = CLIPVisionModel(config.vision_config )
_lowerCAmelCase :Optional[int] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_UpperCAmelCase )
_lowerCAmelCase :int = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :Any = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :str = nn.Parameter(torch.ones(17 ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = nn.Parameter(torch.ones(3 ) , requires_grad=_UpperCAmelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Dict ):
_lowerCAmelCase :str = self.vision_model(_UpperCAmelCase )[1] # pooled_output
_lowerCAmelCase :Union[str, Any] = self.visual_projection(_UpperCAmelCase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase :Optional[int] = cosine_distance(_UpperCAmelCase , self.special_care_embeds ).cpu().float().numpy()
_lowerCAmelCase :List[str] = cosine_distance(_UpperCAmelCase , self.concept_embeds ).cpu().float().numpy()
_lowerCAmelCase :str = []
_lowerCAmelCase :List[Any] = image_embeds.shape[0]
for i in range(_UpperCAmelCase ):
_lowerCAmelCase :Optional[Any] = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
_lowerCAmelCase :List[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
_lowerCAmelCase :List[Any] = special_cos_dist[i][concept_idx]
_lowerCAmelCase :Dict = self.special_care_embeds_weights[concept_idx].item()
_lowerCAmelCase :List[Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} )
_lowerCAmelCase :Any = 0.0_1
for concept_idx in range(len(cos_dist[0] ) ):
_lowerCAmelCase :Union[str, Any] = cos_dist[i][concept_idx]
_lowerCAmelCase :str = self.concept_embeds_weights[concept_idx].item()
_lowerCAmelCase :str = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_UpperCAmelCase )
result.append(_UpperCAmelCase )
_lowerCAmelCase :Any = [len(res['bad_concepts'] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self: str , _UpperCAmelCase: torch.FloatTensor , _UpperCAmelCase: torch.FloatTensor ):
_lowerCAmelCase :Optional[int] = self.vision_model(_UpperCAmelCase )[1] # pooled_output
_lowerCAmelCase :Union[str, Any] = self.visual_projection(_UpperCAmelCase )
_lowerCAmelCase :Dict = cosine_distance(_UpperCAmelCase , self.special_care_embeds )
_lowerCAmelCase :List[str] = cosine_distance(_UpperCAmelCase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
_lowerCAmelCase :Any = 0.0
_lowerCAmelCase :Union[str, Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
_lowerCAmelCase :Tuple = torch.any(special_scores > 0 , dim=1 )
_lowerCAmelCase :List[str] = special_care * 0.0_1
_lowerCAmelCase :Any = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
_lowerCAmelCase :Optional[Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
_lowerCAmelCase :List[str] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts | 687 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a = 6_3_7_8_1_3_7.0
a = 6_3_5_6_7_5_2.3_1_4_2_4_5
a = 6_378_137
def UpperCamelCase_( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , __magic_name__ : float ):
"""simple docstring"""
_lowerCAmelCase :List[Any] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_lowerCAmelCase :Union[str, Any] = atan((1 - flattening) * tan(radians(__magic_name__ ) ) )
_lowerCAmelCase :List[str] = atan((1 - flattening) * tan(radians(__magic_name__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_lowerCAmelCase :int = haversine_distance(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_lowerCAmelCase :str = (b_lata + b_lata) / 2
_lowerCAmelCase :Tuple = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_lowerCAmelCase :str = (sin(__magic_name__ ) ** 2) * (cos(__magic_name__ ) ** 2)
_lowerCAmelCase :Optional[int] = cos(sigma / 2 ) ** 2
_lowerCAmelCase :List[Any] = (sigma - sin(__magic_name__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_lowerCAmelCase :Dict = (cos(__magic_name__ ) ** 2) * (sin(__magic_name__ ) ** 2)
_lowerCAmelCase :str = sin(sigma / 2 ) ** 2
_lowerCAmelCase :Union[str, Any] = (sigma + sin(__magic_name__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod() | 687 | 1 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
a = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
a = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print("""\n""".join(upper_files) + """\n""")
a = [file for file in filepaths if """ """ in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print("""\n""".join(space_files) + """\n""")
a = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print("""\n""".join(hyphen_files) + """\n""")
a = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print("""\n""".join(nodir_files) + """\n""")
a = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files) | 687 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Dict = 'encoder-decoder'
lowerCamelCase : Optional[Any] = True
def __init__( self: str , **_UpperCAmelCase: int ):
super().__init__(**_UpperCAmelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_lowerCAmelCase :Optional[Any] = kwargs.pop('encoder' )
_lowerCAmelCase :Dict = encoder_config.pop('model_type' )
_lowerCAmelCase :str = kwargs.pop('decoder' )
_lowerCAmelCase :str = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase :str = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Tuple = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Any = True
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls: Tuple , _UpperCAmelCase: PretrainedConfig , _UpperCAmelCase: PretrainedConfig , **_UpperCAmelCase: str ):
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
_lowerCAmelCase :Dict = True
_lowerCAmelCase :List[str] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowerCAmelCase :Optional[int] = self.encoder.to_dict()
_lowerCAmelCase :Union[str, Any] = self.decoder.to_dict()
_lowerCAmelCase :List[str] = self.__class__.model_type
return output | 687 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
a = get_tests_dir("""fixtures""")
a = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
a = get_tests_dir("""fixtures/dummy-config.json""")
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :int = 0
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :List[Any] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Dict = AutoFeatureExtractor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase :Union[str, Any] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_lowerCAmelCase :Tuple = AutoFeatureExtractor.from_pretrained(_UpperCAmelCase ).to_dict()
config_dict.pop('feature_extractor_type' )
_lowerCAmelCase :List[str] = WavaVecaFeatureExtractor(**_UpperCAmelCase )
# save in new folder
model_config.save_pretrained(_UpperCAmelCase )
config.save_pretrained(_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = AutoFeatureExtractor.from_pretrained(_UpperCAmelCase )
# make sure private variable is not incorrectly saved
_lowerCAmelCase :Optional[int] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Union[str, Any] = AutoFeatureExtractor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
with self.assertRaisesRegex(
_UpperCAmelCase , 'bert-base is not a local folder and is not a valid model identifier' ):
_lowerCAmelCase :int = AutoFeatureExtractor.from_pretrained('bert-base' )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
with self.assertRaisesRegex(
_UpperCAmelCase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
_lowerCAmelCase :int = AutoFeatureExtractor.from_pretrained(_UpperCAmelCase , revision='aaaaaa' )
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
with self.assertRaisesRegex(
_UpperCAmelCase , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
_lowerCAmelCase :Union[str, Any] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_UpperCAmelCase ):
_lowerCAmelCase :Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_UpperCAmelCase ):
_lowerCAmelCase :List[str] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_UpperCAmelCase )
_lowerCAmelCase :List[Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_UpperCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = AutoFeatureExtractor.from_pretrained(_UpperCAmelCase , trust_remote_code=_UpperCAmelCase )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def SCREAMING_SNAKE_CASE__ ( self: str ):
try:
AutoConfig.register('custom' , _UpperCAmelCase )
AutoFeatureExtractor.register(_UpperCAmelCase , _UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCAmelCase ):
AutoFeatureExtractor.register(_UpperCAmelCase , _UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCAmelCase :Tuple = CustomFeatureExtractor.from_pretrained(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_UpperCAmelCase )
_lowerCAmelCase :Any = AutoFeatureExtractor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self: str ):
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : List[Any] = True
try:
AutoConfig.register('custom' , _UpperCAmelCase )
AutoFeatureExtractor.register(_UpperCAmelCase , _UpperCAmelCase )
# If remote code is not set, the default is to use local
_lowerCAmelCase :Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
_lowerCAmelCase :List[Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_UpperCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
_lowerCAmelCase :Optional[Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_UpperCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(_UpperCAmelCase , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] | 687 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: int , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=13 , _UpperCAmelCase: Optional[Any]=32 , _UpperCAmelCase: List[Any]=2 , _UpperCAmelCase: Optional[int]=3 , _UpperCAmelCase: Optional[int]=16 , _UpperCAmelCase: Optional[Any]=[32, 64, 128] , _UpperCAmelCase: Optional[int]=[1, 2, 1] , _UpperCAmelCase: int=[2, 2, 4] , _UpperCAmelCase: List[str]=2 , _UpperCAmelCase: Dict=2.0 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: str=0.0 , _UpperCAmelCase: int=0.0 , _UpperCAmelCase: str=0.1 , _UpperCAmelCase: Dict="gelu" , _UpperCAmelCase: Optional[Any]=False , _UpperCAmelCase: Union[str, Any]=True , _UpperCAmelCase: Union[str, Any]=0.0_2 , _UpperCAmelCase: Optional[int]=1e-5 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: Optional[Any]=None , _UpperCAmelCase: Tuple=True , _UpperCAmelCase: str=10 , _UpperCAmelCase: int=8 , _UpperCAmelCase: List[Any]=["stage1", "stage2"] , _UpperCAmelCase: List[Any]=[1, 2] , ):
_lowerCAmelCase :Optional[int] = parent
_lowerCAmelCase :Dict = batch_size
_lowerCAmelCase :Optional[Any] = image_size
_lowerCAmelCase :Optional[Any] = patch_size
_lowerCAmelCase :List[Any] = num_channels
_lowerCAmelCase :Optional[int] = embed_dim
_lowerCAmelCase :List[str] = hidden_sizes
_lowerCAmelCase :Union[str, Any] = depths
_lowerCAmelCase :int = num_heads
_lowerCAmelCase :Any = window_size
_lowerCAmelCase :List[Any] = mlp_ratio
_lowerCAmelCase :Optional[int] = qkv_bias
_lowerCAmelCase :Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase :Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase :Dict = drop_path_rate
_lowerCAmelCase :List[Any] = hidden_act
_lowerCAmelCase :Tuple = use_absolute_embeddings
_lowerCAmelCase :Optional[int] = patch_norm
_lowerCAmelCase :Optional[Any] = layer_norm_eps
_lowerCAmelCase :Union[str, Any] = initializer_range
_lowerCAmelCase :List[str] = is_training
_lowerCAmelCase :str = scope
_lowerCAmelCase :Optional[int] = use_labels
_lowerCAmelCase :List[Any] = type_sequence_label_size
_lowerCAmelCase :Union[str, Any] = encoder_stride
_lowerCAmelCase :Optional[int] = out_features
_lowerCAmelCase :List[str] = out_indices
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase :Dict = None
if self.use_labels:
_lowerCAmelCase :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase :str = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self: int ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple ):
_lowerCAmelCase :List[Any] = FocalNetModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCAmelCase :List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Union[str, Any] = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCAmelCase :Optional[int] = None
_lowerCAmelCase :Dict = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Any = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Any = FocalNetForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCAmelCase :List[Any] = 1
_lowerCAmelCase :List[Any] = FocalNetForMaskedImageModeling(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :int = model(_UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int , _UpperCAmelCase: Dict , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = self.type_sequence_label_size
_lowerCAmelCase :Dict = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase :Optional[int] = 1
_lowerCAmelCase :Tuple = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = config_and_inputs
_lowerCAmelCase :List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ (snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase : Optional[Any] = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Tuple = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Any = False
lowerCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = FocalNetModelTester(self )
_lowerCAmelCase :str = ConfigTester(self , config_class=_UpperCAmelCase , embed_dim=37 , has_text_modality=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
return
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def SCREAMING_SNAKE_CASE__ ( self: str ):
pass
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase :Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase , _lowerCAmelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Tuple = model_class(_UpperCAmelCase )
_lowerCAmelCase :Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase :int = [*signature.parameters.keys()]
_lowerCAmelCase :List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: int , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase :Optional[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_lowerCAmelCase :List[Any] = outputs.hidden_states
_lowerCAmelCase :str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# FocalNet has a different seq_length
_lowerCAmelCase :Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_lowerCAmelCase :List[str] = outputs.reshaped_hidden_states
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :int = reshaped_hidden_states[0].shape
_lowerCAmelCase :Optional[int] = (
reshaped_hidden_states[0].view(_UpperCAmelCase , _UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[int] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Dict = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase , _lowerCAmelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :str = 3
_lowerCAmelCase :Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCAmelCase :int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCAmelCase :Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :List[str] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Union[str, Any] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
@slow
def SCREAMING_SNAKE_CASE__ ( self: int ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase :List[Any] = FocalNetModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :Optional[int] = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
_lowerCAmelCase :str = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Tuple = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.default_image_processor
_lowerCAmelCase :Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCAmelCase :Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase :Dict = model(**_UpperCAmelCase )
# verify the logits
_lowerCAmelCase :str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
_lowerCAmelCase :Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : int = (FocalNetBackbone,) if is_torch_available() else ()
lowerCamelCase : str = FocalNetConfig
lowerCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Any = FocalNetModelTester(self ) | 687 | 1 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def __init__( self: List[Any] , _UpperCAmelCase: Callable , _UpperCAmelCase: Optional[Features] = None , _UpperCAmelCase: str = None , _UpperCAmelCase: bool = False , _UpperCAmelCase: bool = False , _UpperCAmelCase: Optional[dict] = None , _UpperCAmelCase: Optional[int] = None , **_UpperCAmelCase: Any , ):
super().__init__(
features=_UpperCAmelCase , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase , streaming=_UpperCAmelCase , num_proc=_UpperCAmelCase , **_UpperCAmelCase , )
_lowerCAmelCase :Dict = Generator(
cache_dir=_UpperCAmelCase , features=_UpperCAmelCase , generator=_UpperCAmelCase , gen_kwargs=_UpperCAmelCase , **_UpperCAmelCase , )
def SCREAMING_SNAKE_CASE__ ( self: int ):
# Build iterable dataset
if self.streaming:
_lowerCAmelCase :Optional[int] = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
_lowerCAmelCase :Optional[int] = None
_lowerCAmelCase :Union[str, Any] = None
_lowerCAmelCase :Optional[Any] = None
_lowerCAmelCase :Optional[int] = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase , download_mode=_UpperCAmelCase , verification_mode=_UpperCAmelCase , base_path=_UpperCAmelCase , num_proc=self.num_proc , )
_lowerCAmelCase :int = self.builder.as_dataset(
split='train' , verification_mode=_UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset | 687 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
a = HfApi()
a = {}
# fmt: off
a = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
a = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
a = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
a = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
a = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
a = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
a = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
a = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
a = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
a = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
a = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
a = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
a = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
a = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
a = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
a = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
a = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith("""CompVis"""):
a = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
a = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
a = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
a = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
a = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3
)
print(F'''{mod.modelId} has passed successfully!!!''') | 687 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
a = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
lowerCamelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCamelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , )
lowerCamelCase : Optional[str] = field(default=snake_case__ , metadata={'help': 'A folder containing the training data.'} )
lowerCamelCase : Optional[str] = field(default=snake_case__ , metadata={'help': 'A folder containing the validation data.'} )
lowerCamelCase : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
lowerCamelCase : int = field(default=32 , metadata={'help': 'The size of the square patches to use for masking.'} )
lowerCamelCase : float = field(
default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , )
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Union[str, Any] = {}
if self.train_dir is not None:
_lowerCAmelCase :Tuple = self.train_dir
if self.validation_dir is not None:
_lowerCAmelCase :List[str] = self.validation_dir
_lowerCAmelCase :List[Any] = data_files if data_files else None
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : str = field(
default=snake_case__ , metadata={
'help': (
'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '
'checkpoint identifier on the hub. '
'Don\'t set if you want to train a model from scratch.'
)
} , )
lowerCamelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(snake_case__ )} , )
lowerCamelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCamelCase : Optional[str] = field(
default=snake_case__ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
lowerCamelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , )
lowerCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCamelCase : str = field(default=snake_case__ , metadata={'help': 'Name or path of preprocessor config.'} )
lowerCamelCase : bool = field(
default=snake_case__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'
)
} , )
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'
)
} , )
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={'help': 'Stride to use for the encoder.'} , )
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: str , _UpperCAmelCase: Optional[int]=192 , _UpperCAmelCase: Optional[Any]=32 , _UpperCAmelCase: Dict=4 , _UpperCAmelCase: int=0.6 ):
_lowerCAmelCase :Tuple = input_size
_lowerCAmelCase :Union[str, Any] = mask_patch_size
_lowerCAmelCase :Optional[Any] = model_patch_size
_lowerCAmelCase :str = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('Input size must be divisible by mask patch size' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('Mask patch size must be divisible by model patch size' )
_lowerCAmelCase :Any = self.input_size // self.mask_patch_size
_lowerCAmelCase :Optional[int] = self.mask_patch_size // self.model_patch_size
_lowerCAmelCase :List[str] = self.rand_size**2
_lowerCAmelCase :Optional[Any] = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self: str ):
_lowerCAmelCase :Tuple = np.random.permutation(self.token_count )[: self.mask_count]
_lowerCAmelCase :List[Any] = np.zeros(self.token_count , dtype=_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = 1
_lowerCAmelCase :Optional[int] = mask.reshape((self.rand_size, self.rand_size) )
_lowerCAmelCase :List[str] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :Any = torch.stack([example['pixel_values'] for example in examples] )
_lowerCAmelCase :Dict = torch.stack([example['mask'] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mim' , __magic_name__ , __magic_name__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCAmelCase :List[str] = training_args.get_process_log_level()
logger.setLevel(__magic_name__ )
transformers.utils.logging.set_verbosity(__magic_name__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_lowerCAmelCase :int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCAmelCase :int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
_lowerCAmelCase :int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_lowerCAmelCase :Optional[Any] = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __magic_name__ ) and data_args.train_val_split > 0.0:
_lowerCAmelCase :Dict = ds['train'].train_test_split(data_args.train_val_split )
_lowerCAmelCase :Dict = split['train']
_lowerCAmelCase :Optional[Any] = split['test']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase :str = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
_lowerCAmelCase :Tuple = AutoConfig.from_pretrained(model_args.config_name_or_path , **__magic_name__ )
elif model_args.model_name_or_path:
_lowerCAmelCase :int = AutoConfig.from_pretrained(model_args.model_name_or_path , **__magic_name__ )
else:
_lowerCAmelCase :Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(__magic_name__ , 'decoder_type' ):
_lowerCAmelCase :Tuple = 'simmim'
# adapt config
_lowerCAmelCase :Union[str, Any] = model_args.image_size if model_args.image_size is not None else config.image_size
_lowerCAmelCase :Tuple = model_args.patch_size if model_args.patch_size is not None else config.patch_size
_lowerCAmelCase :str = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'image_size': model_args.image_size,
'patch_size': model_args.patch_size,
'encoder_stride': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
_lowerCAmelCase :Optional[Any] = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **__magic_name__ )
elif model_args.model_name_or_path:
_lowerCAmelCase :str = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **__magic_name__ )
else:
_lowerCAmelCase :Union[str, Any] = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
_lowerCAmelCase :int = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
_lowerCAmelCase :str = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_lowerCAmelCase :Any = AutoModelForMaskedImageModeling.from_config(__magic_name__ )
if training_args.do_train:
_lowerCAmelCase :Optional[Any] = ds['train'].column_names
else:
_lowerCAmelCase :str = ds['validation'].column_names
if data_args.image_column_name is not None:
_lowerCAmelCase :int = data_args.image_column_name
elif "image" in column_names:
_lowerCAmelCase :Union[str, Any] = 'image'
elif "img" in column_names:
_lowerCAmelCase :Optional[Any] = 'img'
else:
_lowerCAmelCase :Dict = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
_lowerCAmelCase :str = Compose(
[
Lambda(lambda __magic_name__ : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
_lowerCAmelCase :List[str] = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(__magic_name__ : List[str] ):
_lowerCAmelCase :Optional[Any] = [transforms(__magic_name__ ) for image in examples[image_column_name]]
_lowerCAmelCase :Dict = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
_lowerCAmelCase :Dict = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__magic_name__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
_lowerCAmelCase :Optional[Any] = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__magic_name__ )
# Initialize our trainer
_lowerCAmelCase :Any = Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
_lowerCAmelCase :Any = None
if training_args.resume_from_checkpoint is not None:
_lowerCAmelCase :List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCAmelCase :List[Any] = last_checkpoint
_lowerCAmelCase :Optional[Any] = trainer.train(resume_from_checkpoint=__magic_name__ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_lowerCAmelCase :Optional[int] = trainer.evaluate()
trainer.log_metrics('eval' , __magic_name__ )
trainer.save_metrics('eval' , __magic_name__ )
# Write model card and (optionally) push to hub
_lowerCAmelCase :List[str] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'masked-image-modeling',
'dataset': data_args.dataset_name,
'tags': ['masked-image-modeling'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__magic_name__ )
else:
trainer.create_model_card(**__magic_name__ )
if __name__ == "__main__":
main() | 687 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Optional[int] = 10
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :str = [1, 2, 3, 4]
_lowerCAmelCase :Union[str, Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_lowerCAmelCase :Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :List[str] = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
_lowerCAmelCase , _lowerCAmelCase :Optional[Any] = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [] )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Optional[int] = ''
_lowerCAmelCase , _lowerCAmelCase :str = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [] )
self.assertEqual(_UpperCAmelCase , [] )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Optional[Any] = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
_lowerCAmelCase , _lowerCAmelCase :Optional[int] = process_story(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Optional[int] = ['It was the best of times.']
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :Union[str, Any] = torch.tensor([1, 2, 3, 4] )
_lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 0 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :List[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_lowerCAmelCase :Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 23 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 1 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :List[str] = 101
_lowerCAmelCase :Dict = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_lowerCAmelCase :int = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_lowerCAmelCase :List[str] = compute_token_type_ids(_UpperCAmelCase , _UpperCAmelCase )
np.testing.assert_array_equal(_UpperCAmelCase , _UpperCAmelCase ) | 687 | 1 |
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
random.seed(__magic_name__ )
np.random.seed(__magic_name__ )
torch.manual_seed(__magic_name__ )
torch.cuda.manual_seed_all(__magic_name__ )
# ^^ safe to call this function even if cuda is not available
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: Tuple , _UpperCAmelCase: Iterable[torch.nn.Parameter] , _UpperCAmelCase: float = 0.9_9_9_9 , _UpperCAmelCase: float = 0.0 , _UpperCAmelCase: int = 0 , _UpperCAmelCase: bool = False , _UpperCAmelCase: Union[float, int] = 1.0 , _UpperCAmelCase: Union[float, int] = 2 / 3 , _UpperCAmelCase: Optional[Any] = None , _UpperCAmelCase: Dict[str, Any] = None , **_UpperCAmelCase: Optional[int] , ):
if isinstance(_UpperCAmelCase , torch.nn.Module ):
_lowerCAmelCase :Optional[Any] = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage`' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase , )
_lowerCAmelCase :Union[str, Any] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_lowerCAmelCase :Optional[Any] = True
if kwargs.get('max_value' , _UpperCAmelCase ) is not None:
_lowerCAmelCase :Optional[Any] = 'The `max_value` argument is deprecated. Please use `decay` instead.'
deprecate('max_value' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase )
_lowerCAmelCase :Dict = kwargs['max_value']
if kwargs.get('min_value' , _UpperCAmelCase ) is not None:
_lowerCAmelCase :Union[str, Any] = 'The `min_value` argument is deprecated. Please use `min_decay` instead.'
deprecate('min_value' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = kwargs['min_value']
_lowerCAmelCase :Optional[int] = list(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = [p.clone().detach() for p in parameters]
if kwargs.get('device' , _UpperCAmelCase ) is not None:
_lowerCAmelCase :int = 'The `device` argument is deprecated. Please use `to` instead.'
deprecate('device' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase )
self.to(device=kwargs['device'] )
_lowerCAmelCase :Tuple = None
_lowerCAmelCase :List[str] = decay
_lowerCAmelCase :Optional[int] = min_decay
_lowerCAmelCase :Tuple = update_after_step
_lowerCAmelCase :List[Any] = use_ema_warmup
_lowerCAmelCase :Optional[int] = inv_gamma
_lowerCAmelCase :Union[str, Any] = power
_lowerCAmelCase :Any = 0
_lowerCAmelCase :List[str] = None # set in `step()`
_lowerCAmelCase :Any = model_cls
_lowerCAmelCase :Union[str, Any] = model_config
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls: int , _UpperCAmelCase: Any , _UpperCAmelCase: List[Any] ):
_lowerCAmelCase , _lowerCAmelCase :Optional[Any] = model_cls.load_config(_UpperCAmelCase , return_unused_kwargs=_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = model_cls.from_pretrained(_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = cls(model.parameters() , model_cls=_UpperCAmelCase , model_config=model.config )
ema_model.load_state_dict(_UpperCAmelCase )
return ema_model
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: Optional[Any] ):
if self.model_cls is None:
raise ValueError('`save_pretrained` can only be used if `model_cls` was defined at __init__.' )
if self.model_config is None:
raise ValueError('`save_pretrained` can only be used if `model_config` was defined at __init__.' )
_lowerCAmelCase :Optional[int] = self.model_cls.from_config(self.model_config )
_lowerCAmelCase :Tuple = self.state_dict()
state_dict.pop('shadow_params' , _UpperCAmelCase )
model.register_to_config(**_UpperCAmelCase )
self.copy_to(model.parameters() )
model.save_pretrained(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] , _UpperCAmelCase: int ):
_lowerCAmelCase :List[str] = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_lowerCAmelCase :Union[str, Any] = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_lowerCAmelCase :Optional[int] = (1 + step) / (10 + step)
_lowerCAmelCase :List[Any] = min(_UpperCAmelCase , self.decay )
# make sure decay is not smaller than min_decay
_lowerCAmelCase :List[Any] = max(_UpperCAmelCase , self.min_decay )
return cur_decay_value
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Iterable[torch.nn.Parameter] ):
if isinstance(_UpperCAmelCase , torch.nn.Module ):
_lowerCAmelCase :str = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage.step`' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase , )
_lowerCAmelCase :Optional[int] = parameters.parameters()
_lowerCAmelCase :Tuple = list(_UpperCAmelCase )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_lowerCAmelCase :Any = self.get_decay(self.optimization_step )
_lowerCAmelCase :List[str] = decay
_lowerCAmelCase :Any = 1 - decay
_lowerCAmelCase :int = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , _UpperCAmelCase ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_lowerCAmelCase :Dict = deepspeed.zero.GatheredParameters(_UpperCAmelCase , modifier_rank=_UpperCAmelCase )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Iterable[torch.nn.Parameter] ):
_lowerCAmelCase :Tuple = list(_UpperCAmelCase )
for s_param, param in zip(self.shadow_params , _UpperCAmelCase ):
param.data.copy_(s_param.to(param.device ).data )
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: int=None , _UpperCAmelCase: Any=None ):
_lowerCAmelCase :Optional[Any] = [
p.to(device=_UpperCAmelCase , dtype=_UpperCAmelCase ) if p.is_floating_point() else p.to(device=_UpperCAmelCase )
for p in self.shadow_params
]
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def SCREAMING_SNAKE_CASE__ ( self: str , _UpperCAmelCase: Iterable[torch.nn.Parameter] ):
_lowerCAmelCase :Union[str, Any] = [param.detach().cpu().clone() for param in parameters]
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] , _UpperCAmelCase: Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError('This ExponentialMovingAverage has no `store()`ed weights ' 'to `restore()`' )
for c_param, param in zip(self.temp_stored_params , _UpperCAmelCase ):
param.data.copy_(c_param.data )
# Better memory-wise.
_lowerCAmelCase :Union[str, Any] = None
def SCREAMING_SNAKE_CASE__ ( self: str , _UpperCAmelCase: dict ):
_lowerCAmelCase :Optional[int] = copy.deepcopy(_UpperCAmelCase )
_lowerCAmelCase :str = state_dict.get('decay' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('Decay must be between 0 and 1' )
_lowerCAmelCase :Tuple = state_dict.get('min_decay' , self.min_decay )
if not isinstance(self.min_decay , _UpperCAmelCase ):
raise ValueError('Invalid min_decay' )
_lowerCAmelCase :List[str] = state_dict.get('optimization_step' , self.optimization_step )
if not isinstance(self.optimization_step , _UpperCAmelCase ):
raise ValueError('Invalid optimization_step' )
_lowerCAmelCase :int = state_dict.get('update_after_step' , self.update_after_step )
if not isinstance(self.update_after_step , _UpperCAmelCase ):
raise ValueError('Invalid update_after_step' )
_lowerCAmelCase :Tuple = state_dict.get('use_ema_warmup' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , _UpperCAmelCase ):
raise ValueError('Invalid use_ema_warmup' )
_lowerCAmelCase :int = state_dict.get('inv_gamma' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('Invalid inv_gamma' )
_lowerCAmelCase :str = state_dict.get('power' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('Invalid power' )
_lowerCAmelCase :Any = state_dict.get('shadow_params' , _UpperCAmelCase )
if shadow_params is not None:
_lowerCAmelCase :List[Any] = shadow_params
if not isinstance(self.shadow_params , _UpperCAmelCase ):
raise ValueError('shadow_params must be a list' )
if not all(isinstance(_UpperCAmelCase , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('shadow_params must all be Tensors' ) | 687 |
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
a = int(input("""Enter number: """).strip())
print(F'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''') | 687 | 1 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
a = logging.get_logger(__name__)
def UpperCamelCase_( __magic_name__ : bool , __magic_name__ : bool ):
"""simple docstring"""
def run_func(__magic_name__ : int ):
@wraps(__magic_name__ )
def run_in_eager_mode(*__magic_name__ : str , **__magic_name__ : List[str] ):
return func(*__magic_name__ , **__magic_name__ )
@wraps(__magic_name__ )
@tf.function(experimental_compile=__magic_name__ )
def run_in_graph_mode(*__magic_name__ : Optional[Any] , **__magic_name__ : Optional[Any] ):
return func(*__magic_name__ , **__magic_name__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = random.Random()
_lowerCAmelCase :Any = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__magic_name__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : TensorFlowBenchmarkArguments
lowerCamelCase : PretrainedConfig
lowerCamelCase : str = "TensorFlow"
@property
def SCREAMING_SNAKE_CASE__ ( self: str ):
return tf.__version__
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: str , _UpperCAmelCase: int , _UpperCAmelCase: int ):
# initialize GPU on separate process
_lowerCAmelCase :Optional[int] = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
_lowerCAmelCase :Optional[int] = self._prepare_inference_func(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return self._measure_speed(_inference )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] , _UpperCAmelCase: str , _UpperCAmelCase: int , _UpperCAmelCase: int ):
_lowerCAmelCase :int = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
_lowerCAmelCase :List[Any] = self._prepare_train_func(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return self._measure_speed(_train )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] , _UpperCAmelCase: str , _UpperCAmelCase: int , _UpperCAmelCase: int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _UpperCAmelCase )
_lowerCAmelCase :Optional[int] = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
_lowerCAmelCase :Union[str, Any] = self._prepare_inference_func(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return self._measure_memory(_inference )
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: str , _UpperCAmelCase: int , _UpperCAmelCase: int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _UpperCAmelCase )
_lowerCAmelCase :List[str] = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
_lowerCAmelCase :Optional[Any] = self._prepare_train_func(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return self._measure_memory(_train )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: str , _UpperCAmelCase: int , _UpperCAmelCase: int ):
_lowerCAmelCase :List[str] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
_lowerCAmelCase :Optional[int] = (
hasattr(_UpperCAmelCase , 'architectures' )
and isinstance(config.architectures , _UpperCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase :str = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase :int = __import__('transformers' , fromlist=[model_class] )
_lowerCAmelCase :List[Any] = getattr(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Any = model_cls(_UpperCAmelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
_lowerCAmelCase :str = TF_MODEL_MAPPING[config.__class__](_UpperCAmelCase )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase :str = config.vocab_size if hasattr(_UpperCAmelCase , 'vocab_size' ) else config.encoder.vocab_size
_lowerCAmelCase :str = random_input_ids(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase , training=_UpperCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(_UpperCAmelCase , training=_UpperCAmelCase )
_lowerCAmelCase :Dict = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: str , _UpperCAmelCase: int , _UpperCAmelCase: int ):
_lowerCAmelCase :Dict = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
_lowerCAmelCase :Union[str, Any] = (
hasattr(_UpperCAmelCase , 'architectures' )
and isinstance(config.architectures , _UpperCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase :List[str] = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase :Union[str, Any] = __import__('transformers' , fromlist=[model_class] )
_lowerCAmelCase :int = getattr(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :int = model_cls(_UpperCAmelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
_lowerCAmelCase :Dict = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_UpperCAmelCase )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase :List[str] = config.vocab_size if hasattr(_UpperCAmelCase , 'vocab_size' ) else config.encoder.vocab_size
_lowerCAmelCase :Tuple = random_input_ids(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_lowerCAmelCase :int = model(_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase , labels=_UpperCAmelCase , training=_UpperCAmelCase )[0]
_lowerCAmelCase :int = tf.gradients(_UpperCAmelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_lowerCAmelCase :str = model(_UpperCAmelCase , labels=_UpperCAmelCase , training=_UpperCAmelCase )[0]
_lowerCAmelCase :List[Any] = tf.gradients(_UpperCAmelCase , model.trainable_variables )
return gradients
_lowerCAmelCase :Dict = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Any ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(_UpperCAmelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_lowerCAmelCase :Union[str, Any] = timeit.repeat(
_UpperCAmelCase , repeat=self.args.repeat , number=10 , )
return min(_UpperCAmelCase ) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: Callable[[], None] ):
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
_lowerCAmelCase :List[str] = start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
_lowerCAmelCase :Dict = 'N/A'
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
_lowerCAmelCase :List[str] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_lowerCAmelCase :Dict = nvml.nvmlDeviceGetMemoryInfo(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = meminfo.used
_lowerCAmelCase :Any = Memory(_UpperCAmelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
_lowerCAmelCase :Dict = None
else:
_lowerCAmelCase :str = measure_peak_memory_cpu(_UpperCAmelCase )
_lowerCAmelCase :List[str] = Memory(_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
_lowerCAmelCase :Tuple = stop_memory_tracing(_UpperCAmelCase )
if memory is None:
_lowerCAmelCase :Optional[int] = summary.total
else:
_lowerCAmelCase :Any = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
return "N/A", None | 687 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: List[Any] , _UpperCAmelCase: int , _UpperCAmelCase: MutableSequence[float] ):
if len(_UpperCAmelCase ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
_lowerCAmelCase :list[float] = list(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = degree
def __add__( self: str , _UpperCAmelCase: Polynomial ):
if self.degree > polynomial_a.degree:
_lowerCAmelCase :Any = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _UpperCAmelCase )
else:
_lowerCAmelCase :List[Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _UpperCAmelCase )
def __sub__( self: str , _UpperCAmelCase: Polynomial ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self: Union[str, Any] ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self: int , _UpperCAmelCase: Polynomial ):
_lowerCAmelCase :list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: int | float ):
_lowerCAmelCase :int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self: Union[str, Any] ):
_lowerCAmelCase :Dict = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_UpperCAmelCase )
return polynomial
def __repr__( self: Optional[Any] ):
return self.__str__()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :list[float] = [0] * self.degree
for i in range(self.degree ):
_lowerCAmelCase :Tuple = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int | float = 0 ):
_lowerCAmelCase :list[float] = [0] * (self.degree + 2)
_lowerCAmelCase :str = constant
for i in range(self.degree + 1 ):
_lowerCAmelCase :List[str] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _UpperCAmelCase )
def __eq__( self: List[Any] , _UpperCAmelCase: object ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self: Optional[Any] , _UpperCAmelCase: object ):
return not self.__eq__(_UpperCAmelCase ) | 687 | 1 |
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ ), f"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
_lowerCAmelCase :List[str] = f"""The input value of [n={number}] has to be > 0"""
raise ValueError(__magic_name__ )
else:
_lowerCAmelCase :Dict = sylvester(number - 1 )
_lowerCAmelCase :Any = num - 1
_lowerCAmelCase :Any = num
return lower * upper + 1
if __name__ == "__main__":
print(F'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''') | 687 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a = logging.get_logger(__name__)
a = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class UpperCAmelCase_ (snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = 'bit'
lowerCamelCase : int = ['preactivation', 'bottleneck']
lowerCamelCase : Tuple = ['SAME', 'VALID']
def __init__( self: str , _UpperCAmelCase: Optional[int]=3 , _UpperCAmelCase: List[str]=64 , _UpperCAmelCase: List[str]=[256, 512, 1024, 2048] , _UpperCAmelCase: Optional[Any]=[3, 4, 6, 3] , _UpperCAmelCase: List[Any]="preactivation" , _UpperCAmelCase: Optional[Any]="relu" , _UpperCAmelCase: Tuple=None , _UpperCAmelCase: List[str]=32 , _UpperCAmelCase: List[str]=0.0 , _UpperCAmelCase: Optional[int]=False , _UpperCAmelCase: Optional[int]=32 , _UpperCAmelCase: Any=1 , _UpperCAmelCase: Tuple=None , _UpperCAmelCase: Any=None , **_UpperCAmelCase: Union[str, Any] , ):
super().__init__(**_UpperCAmelCase )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_lowerCAmelCase :List[str] = global_padding.upper()
else:
raise ValueError(f"""Padding strategy {global_padding} not supported""" )
_lowerCAmelCase :Tuple = num_channels
_lowerCAmelCase :int = embedding_size
_lowerCAmelCase :int = hidden_sizes
_lowerCAmelCase :Optional[int] = depths
_lowerCAmelCase :Union[str, Any] = layer_type
_lowerCAmelCase :Any = hidden_act
_lowerCAmelCase :List[str] = global_padding
_lowerCAmelCase :Optional[int] = num_groups
_lowerCAmelCase :Dict = drop_path_rate
_lowerCAmelCase :int = embedding_dynamic_padding
_lowerCAmelCase :Any = output_stride
_lowerCAmelCase :Any = width_factor
_lowerCAmelCase :Optional[Any] = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(_UpperCAmelCase ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase :Any = get_aligned_output_features_output_indices(
out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names ) | 687 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : float | Decimal , __magic_name__ : float = 10**-10 ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = a
while True:
_lowerCAmelCase :str = Decimal(__magic_name__ ) - (
Decimal(eval(__magic_name__ ) ) / Decimal(eval(str(diff(__magic_name__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__magic_name__ ) ) < precision: # noqa: S307
return float(__magic_name__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''') | 687 | 1 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
a = 1.0_5457_1817E-34 # unit of ℏ : J * s
a = 3E8 # unit of c : m * s^-1
def UpperCamelCase_( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float ):
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
_lowerCAmelCase :List[str] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowerCAmelCase :Optional[int] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowerCAmelCase :Optional[int] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod() | 687 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
a = {
"""sample_size""": 32,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [32, 64],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a = {
"""sample_size""": 64,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a = {
"""sample_size""": 256,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a = {
"""num_train_timesteps""": 40,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
a = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
a = {
"""num_train_timesteps""": 151,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
def UpperCamelCase_( __magic_name__ : Dict ):
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any]=False ):
"""simple docstring"""
_lowerCAmelCase :int = checkpoint[f"""{old_prefix}.in_layers.0.weight"""]
_lowerCAmelCase :Union[str, Any] = checkpoint[f"""{old_prefix}.in_layers.0.bias"""]
_lowerCAmelCase :str = checkpoint[f"""{old_prefix}.in_layers.2.weight"""]
_lowerCAmelCase :Optional[Any] = checkpoint[f"""{old_prefix}.in_layers.2.bias"""]
_lowerCAmelCase :str = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""]
_lowerCAmelCase :Any = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""]
_lowerCAmelCase :str = checkpoint[f"""{old_prefix}.out_layers.0.weight"""]
_lowerCAmelCase :List[Any] = checkpoint[f"""{old_prefix}.out_layers.0.bias"""]
_lowerCAmelCase :Optional[int] = checkpoint[f"""{old_prefix}.out_layers.3.weight"""]
_lowerCAmelCase :Dict = checkpoint[f"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
_lowerCAmelCase :List[Any] = checkpoint[f"""{old_prefix}.skip_connection.weight"""]
_lowerCAmelCase :int = checkpoint[f"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : List[str]=None ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Tuple = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Any = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
_lowerCAmelCase :int = checkpoint[f"""{old_prefix}.norm.weight"""]
_lowerCAmelCase :Dict = checkpoint[f"""{old_prefix}.norm.bias"""]
_lowerCAmelCase :Dict = weight_q.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :str = bias_q.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :List[str] = weight_k.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :Optional[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :Tuple = weight_v.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :List[Any] = bias_v.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :int = (
checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
_lowerCAmelCase :Optional[Any] = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Optional[Any] ):
"""simple docstring"""
_lowerCAmelCase :Union[str, Any] = torch.load(__magic_name__ , map_location='cpu' )
_lowerCAmelCase :List[Any] = {}
_lowerCAmelCase :List[str] = checkpoint['time_embed.0.weight']
_lowerCAmelCase :Tuple = checkpoint['time_embed.0.bias']
_lowerCAmelCase :Dict = checkpoint['time_embed.2.weight']
_lowerCAmelCase :Union[str, Any] = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_lowerCAmelCase :Union[str, Any] = checkpoint['label_emb.weight']
_lowerCAmelCase :str = checkpoint['input_blocks.0.0.weight']
_lowerCAmelCase :str = checkpoint['input_blocks.0.0.bias']
_lowerCAmelCase :List[Any] = unet_config['down_block_types']
_lowerCAmelCase :Any = unet_config['layers_per_block']
_lowerCAmelCase :List[Any] = unet_config['attention_head_dim']
_lowerCAmelCase :Tuple = unet_config['block_out_channels']
_lowerCAmelCase :List[str] = 1
_lowerCAmelCase :Optional[int] = channels_list[0]
for i, layer_type in enumerate(__magic_name__ ):
_lowerCAmelCase :Tuple = channels_list[i]
_lowerCAmelCase :Optional[Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__magic_name__ ):
_lowerCAmelCase :int = f"""down_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :List[Any] = f"""input_blocks.{current_layer}.0"""
_lowerCAmelCase :int = True if j == 0 and downsample_block_has_skip else False
_lowerCAmelCase :List[Any] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__magic_name__ ):
_lowerCAmelCase :List[str] = f"""down_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :Optional[int] = f"""input_blocks.{current_layer}.0"""
_lowerCAmelCase :List[str] = True if j == 0 and downsample_block_has_skip else False
_lowerCAmelCase :Optional[int] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
_lowerCAmelCase :Optional[int] = f"""down_blocks.{i}.attentions.{j}"""
_lowerCAmelCase :str = f"""input_blocks.{current_layer}.1"""
_lowerCAmelCase :Optional[Any] = convert_attention(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
_lowerCAmelCase :Union[str, Any] = f"""down_blocks.{i}.downsamplers.0"""
_lowerCAmelCase :Tuple = f"""input_blocks.{current_layer}.0"""
_lowerCAmelCase :Optional[int] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
current_layer += 1
_lowerCAmelCase :Dict = current_channels
# hardcoded the mid-block for now
_lowerCAmelCase :int = 'mid_block.resnets.0'
_lowerCAmelCase :Optional[Any] = 'middle_block.0'
_lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Optional[int] = 'mid_block.attentions.0'
_lowerCAmelCase :Optional[int] = 'middle_block.1'
_lowerCAmelCase :List[Any] = convert_attention(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Union[str, Any] = 'mid_block.resnets.1'
_lowerCAmelCase :Optional[int] = 'middle_block.2'
_lowerCAmelCase :int = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Tuple = 0
_lowerCAmelCase :str = unet_config['up_block_types']
for i, layer_type in enumerate(__magic_name__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCAmelCase :Optional[Any] = f"""up_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :Dict = f"""output_blocks.{current_layer}.0"""
_lowerCAmelCase :Any = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
_lowerCAmelCase :Any = f"""up_blocks.{i}.upsamplers.0"""
_lowerCAmelCase :Dict = f"""output_blocks.{current_layer-1}.1"""
_lowerCAmelCase :Tuple = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCAmelCase :Tuple = f"""up_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :List[str] = f"""output_blocks.{current_layer}.0"""
_lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
_lowerCAmelCase :str = f"""up_blocks.{i}.attentions.{j}"""
_lowerCAmelCase :List[Any] = f"""output_blocks.{current_layer}.1"""
_lowerCAmelCase :int = convert_attention(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
_lowerCAmelCase :Optional[int] = f"""up_blocks.{i}.upsamplers.0"""
_lowerCAmelCase :int = f"""output_blocks.{current_layer-1}.2"""
_lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :str = checkpoint['out.0.weight']
_lowerCAmelCase :Union[str, Any] = checkpoint['out.0.bias']
_lowerCAmelCase :List[Any] = checkpoint['out.2.weight']
_lowerCAmelCase :Dict = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
a = parser.parse_args()
a = strabool(args.class_cond)
a = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
a = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
a = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
a = None
a = con_pt_to_diffuser(args.unet_path, unet_config)
a = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
a = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
a = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
a = CMStochasticIterativeScheduler(**scheduler_config)
a = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path) | 687 | 1 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
a = logging.get_logger(__name__)
logging.set_verbosity_info()
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_lowerCAmelCase :str = XLMProphetNetForConditionalGenerationOld.from_pretrained(__magic_name__ )
_lowerCAmelCase , _lowerCAmelCase :Union[str, Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
__magic_name__ , output_loading_info=__magic_name__ )
else:
_lowerCAmelCase :str = ProphetNetForConditionalGenerationOld.from_pretrained(__magic_name__ )
_lowerCAmelCase , _lowerCAmelCase :Dict = ProphetNetForConditionalGeneration.from_pretrained(
__magic_name__ , output_loading_info=__magic_name__ )
_lowerCAmelCase :Any = ['key_proj', 'value_proj', 'query_proj']
_lowerCAmelCase :Optional[Any] = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
_lowerCAmelCase :str = key.split('.' )
if attributes[0] == "lm_head":
_lowerCAmelCase :Optional[Any] = prophet
_lowerCAmelCase :Dict = prophet_old
else:
_lowerCAmelCase :Tuple = prophet.prophetnet
_lowerCAmelCase :int = prophet_old.model
_lowerCAmelCase :Dict = False
for attribute in attributes:
if attribute in mapping:
_lowerCAmelCase :Tuple = mapping[attribute]
if not hasattr(__magic_name__ , __magic_name__ ) and len(__magic_name__ ) > 0:
_lowerCAmelCase :Tuple = attribute
elif hasattr(__magic_name__ , __magic_name__ ):
_lowerCAmelCase :Dict = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_lowerCAmelCase :str = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
_lowerCAmelCase :Optional[int] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_lowerCAmelCase :str = old_model.bias
logger.info(f"""{attribute} is initialized""" )
_lowerCAmelCase :int = True
break
elif attribute in special_keys and hasattr(__magic_name__ , 'in_proj_weight' ):
_lowerCAmelCase :Optional[Any] = old_model.in_proj_weight.shape[0] // 3
_lowerCAmelCase :Optional[Any] = getattr(__magic_name__ , __magic_name__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_lowerCAmelCase :Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_lowerCAmelCase :str = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_lowerCAmelCase :List[str] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_lowerCAmelCase :List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_lowerCAmelCase :str = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_lowerCAmelCase :List[str] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_lowerCAmelCase :Optional[Any] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
_lowerCAmelCase :Optional[int] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
_lowerCAmelCase :List[str] = True
break
if attribute.isdigit():
_lowerCAmelCase :int = model[int(__magic_name__ )]
_lowerCAmelCase :Union[str, Any] = old_model[int(__magic_name__ )]
else:
_lowerCAmelCase :Dict = getattr(__magic_name__ , __magic_name__ )
if old_attribute == "":
_lowerCAmelCase :str = old_model
else:
if not hasattr(__magic_name__ , __magic_name__ ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
_lowerCAmelCase :str = getattr(__magic_name__ , __magic_name__ )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(__magic_name__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path) | 687 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
a = """ \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
_lowerCAmelCase :Tuple = self.diffusers_dir
shutil.copy(
os.path.join(_UpperCAmelCase , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :str = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=None ):
_lowerCAmelCase :int = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_lowerCAmelCase :Dict = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_lowerCAmelCase :Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_lowerCAmelCase :List[str] = black.format_str(_UpperCAmelCase , mode=_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(_UpperCAmelCase , 'w' , newline='\n' ) as f:
f.write(_UpperCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_UpperCAmelCase )
with open(_UpperCAmelCase , 'r' ) as f:
self.assertTrue(f.read() , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :List[str] = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
# Base copy consistency
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , _UpperCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , )
# Copy consistency with a really long name
_lowerCAmelCase :Optional[int] = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , _UpperCAmelCase , _UpperCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , _UpperCAmelCase , overwrite_result=re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , ) | 687 | 1 |
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
a = 50_003
a = 50_002
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : str = PLBartTokenizer
lowerCamelCase : List[str] = None
lowerCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase :List[str] = PLBartTokenizer(_UpperCAmelCase , language_codes='base' , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Optional[int] = PLBartTokenizer(_UpperCAmelCase , language_codes='base' , keep_accents=_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(_UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase :Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowerCAmelCase :List[Any] = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase :List[Any] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
_lowerCAmelCase :int = tokenizer.vocab_size
_lowerCAmelCase :int = [tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) for x in range(end - 4 , _UpperCAmelCase )]
self.assertListEqual(_UpperCAmelCase , ['__java__', '__python__', '__en_XX__', '<mask>'] )
_lowerCAmelCase :Any = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
_lowerCAmelCase :Optional[int] = tokenizer(_UpperCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) , _UpperCAmelCase , )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :List[Any] = PLBartTokenizer(_UpperCAmelCase , language_codes='multi' , keep_accents=_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(_UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase :List[str] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowerCAmelCase :Tuple = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase :List[str] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
_lowerCAmelCase :int = tokenizer.vocab_size
_lowerCAmelCase :Optional[int] = [tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) for x in range(end - 7 , _UpperCAmelCase )]
self.assertListEqual(
_UpperCAmelCase , ['__java__', '__python__', '__en_XX__', '__javascript__', '__php__', '__ruby__', '__go__'] )
_lowerCAmelCase :List[Any] = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
_lowerCAmelCase :Dict = tokenizer(_UpperCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) , _UpperCAmelCase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = 'uclanlp/plbart-python-en_XX'
lowerCamelCase : int = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
lowerCamelCase : Optional[int] = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
lowerCamelCase : Dict = [
1_34,
54_52,
3_34_60,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
9_88,
20,
3_34_56,
19,
3_34_56,
7_71,
39,
42_58,
8_89,
33_18,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
24_71,
2,
PYTHON_CODE,
]
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls: Union[str, Any] ):
_lowerCAmelCase :PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='base' , src_lang='python' , tgt_lang='en_XX' )
_lowerCAmelCase :str = 1
return cls
def SCREAMING_SNAKE_CASE__ ( self: int ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__java__'] , 5_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__python__'] , 5_0002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__en_XX__'] , 5_0003 )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: str ):
self.assertIn(_UpperCAmelCase , self.tokenizer.all_special_ids )
_lowerCAmelCase :Union[str, Any] = [EN_CODE, 9037, 3_3442, 57, 752, 153, 14, 56, 18, 9, 2]
_lowerCAmelCase :int = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
_lowerCAmelCase :int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Dict = ['def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])' * 20]
self.assertIsInstance(src_text[0] , _UpperCAmelCase )
_lowerCAmelCase :List[str] = 10
_lowerCAmelCase :List[str] = self.tokenizer(_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', '__java__'] ) , [5_0004, 5_0001] )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :List[Any] = tempfile.mkdtemp()
_lowerCAmelCase :Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_UpperCAmelCase )
_lowerCAmelCase :int = PLBartTokenizer.from_pretrained(_UpperCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _UpperCAmelCase )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , return_tensors='pt' )
_lowerCAmelCase :List[str] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , _UpperCAmelCase )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
_lowerCAmelCase :Optional[Any] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
_lowerCAmelCase :Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :str = self.tokenizer(self.src_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=3 , return_tensors='pt' )
_lowerCAmelCase :str = self.tokenizer(
text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=10 , return_tensors='pt' )
_lowerCAmelCase :List[str] = targets['input_ids']
_lowerCAmelCase :str = shift_tokens_right(_UpperCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Optional[Any] = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='java' )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , {
# A, test, EOS, en_XX
'input_ids': [[150, 242, 2, 5_0003]],
'attention_mask': [[1, 1, 1, 1]],
# java
'forced_bos_token_id': 5_0001,
} , ) | 687 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be trained.'} )
lowerCamelCase : Optional[str] = field(
default='./' , metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path of training dataset.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for training.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for evaluation.'} )
lowerCamelCase : Optional[float] = field(default=0.1 , metadata={'help': 'Value of weight decay.'} )
lowerCamelCase : Optional[int] = field(
default=1_00_00 , metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} )
lowerCamelCase : Optional[float] = field(default=2e-4 , metadata={'help': 'Learning rate fo training.'} )
lowerCamelCase : Optional[str] = field(default='cosine' , metadata={'help': 'Learning rate.'} )
lowerCamelCase : Optional[int] = field(
default=7_50 , metadata={'help': 'Number of warmup steps in the learning rate schedule.'} )
lowerCamelCase : Optional[int] = field(
default=16 , metadata={'help': 'Number of gradient accumulation steps.'} )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} )
lowerCamelCase : Optional[int] = field(default=5_00_00 , metadata={'help': 'Maximum number of training steps.'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Sequence lengths used for training.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Training seed.'} )
lowerCamelCase : Optional[int] = field(
default=10_24 , metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} , )
lowerCamelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'States path if the training should continue from a checkpoint folder.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'If True the data is pretokenized.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size used for evaluation.'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Length of sequences to be evaluated.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} )
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} , )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Sample from the language model\'s output distribution.'} )
lowerCamelCase : Optional[float] = field(default=0.2 , metadata={'help': 'Sampling temperature used for generation.'} )
lowerCamelCase : Optional[int] = field(default=2_56 , metadata={'help': 'Maximum number of newly generated tokens.'} )
lowerCamelCase : Optional[int] = field(default=0 , metadata={'help': 'Top-k parameter used for generation.'} )
lowerCamelCase : Optional[float] = field(default=0.95 , metadata={'help': 'Top-p parameter used for nucleus sampling.'} )
lowerCamelCase : Optional[int] = field(default=10 , metadata={'help': 'Number of generations to run in parallel.'} )
lowerCamelCase : Optional[int] = field(
default=2_00 , metadata={'help': 'Number of completions to generate for each sample.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase : Optional[str] = field(
default='eval_results.json' , metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase : Optional[str] = field(
default='0' , metadata={'help': 'Allow `code_eval` to execute Python code on machine'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={
'help': (
'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'
' number corresponds to which GPU device id to run on.'
)
} , )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={
'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'
} , )
lowerCamelCase : Optional[str] = field(
default='transformersbook/codeparrot' , metadata={'help': 'Folder or name of dataset to process.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot-clean' , metadata={'help': 'Folder to save processed processed dataset.'} )
lowerCamelCase : Optional[int] = field(
default=10_00_00 , metadata={'help': 'Number of files to save per JSON output file.'} )
lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase : Optional[float] = field(
default=10_00 , metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=1_00 , metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=0.25 , metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=1.5 , metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=0.7 , metadata={'help': 'Probability for filtering config, test and uncommon files.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} , )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'If True, near-duplicate samples are removed.'} )
lowerCamelCase : Optional[float] = field(
default=0.85 , metadata={'help': 'Jaccard threshold for near-duplicate samples.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='gpt2' , metadata={'help': 'Base tokenizer to build new tokenizer from.'} )
lowerCamelCase : Optional[str] = field(
default='transformersbook/codeparrot-train' , metadata={'help': 'Dataset to train tokenizer on.'} )
lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase : Optional[int] = field(default=20_00_00 , metadata={'help': 'Number of examples to train tokenizer on.'} )
lowerCamelCase : Optional[int] = field(
default=3_27_68 , metadata={'help': 'Number of examples to train the tokenizer on.'} )
lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of new tokenizer.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path to the dataset to pretokenize.'} )
lowerCamelCase : Optional[str] = field(
default='tokenized-codeparrot-train' , metadata={'help': 'Repo name of the pretokenized data.'} )
lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='gpt2-large' , metadata={'help': 'Configuration to use for model initialization.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Tokenizer attached to model.'} )
lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of the created model.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} ) | 687 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.