code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __magic_name__ :
def __init__( self , snake_case) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Dict =data
_UpperCAmelCase : Tuple =[0X6_7_4_5_2_3_0_1, 0Xe_f_c_d_a_b_8_9, 0X9_8_b_a_d_c_f_e, 0X1_0_3_2_5_4_7_6, 0Xc_3_d_2_e_1_f_0]
@staticmethod
def lowerCAmelCase ( snake_case , snake_case) -> Optional[Any]:
'''simple docstring'''
return ((n << b) | (n >> (3_2 - b))) & 0Xf_f_f_f_f_f_f_f
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict =b'\x80' + b'\x00' * (6_3 - (len(self.data) + 8) % 6_4)
_UpperCAmelCase : Union[str, Any] =self.data + padding + struct.pack('>Q' , 8 * len(self.data))
return padded_data
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return [
self.padded_data[i : i + 6_4] for i in range(0 , len(self.padded_data) , 6_4)
]
def lowerCAmelCase ( self , snake_case) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Dict =list(struct.unpack('>16L' , _UpperCAmelCase)) + [0] * 6_4
for i in range(1_6 , 8_0):
_UpperCAmelCase : Optional[int] =self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 1_4] ^ w[i - 1_6]) , 1)
return w
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =self.padding()
_UpperCAmelCase : Optional[Any] =self.split_blocks()
for block in self.blocks:
_UpperCAmelCase : List[str] =self.expand_block(_UpperCAmelCase)
_UpperCAmelCase : Tuple =self.h
for i in range(0 , 8_0):
if 0 <= i < 2_0:
_UpperCAmelCase : Dict =(b & c) | ((~b) & d)
_UpperCAmelCase : str =0X5_a_8_2_7_9_9_9
elif 2_0 <= i < 4_0:
_UpperCAmelCase : Dict =b ^ c ^ d
_UpperCAmelCase : int =0X6_e_d_9_e_b_a_1
elif 4_0 <= i < 6_0:
_UpperCAmelCase : Optional[Any] =(b & c) | (b & d) | (c & d)
_UpperCAmelCase : Union[str, Any] =0X8_f_1_b_b_c_d_c
elif 6_0 <= i < 8_0:
_UpperCAmelCase : Dict =b ^ c ^ d
_UpperCAmelCase : Tuple =0Xc_a_6_2_c_1_d_6
_UpperCAmelCase : List[str] =(
self.rotate(_UpperCAmelCase , 5) + f + e + k + expanded_block[i] & 0Xf_f_f_f_f_f_f_f,
a,
self.rotate(_UpperCAmelCase , 3_0),
c,
d,
)
_UpperCAmelCase : List[Any] =(
self.h[0] + a & 0Xf_f_f_f_f_f_f_f,
self.h[1] + b & 0Xf_f_f_f_f_f_f_f,
self.h[2] + c & 0Xf_f_f_f_f_f_f_f,
self.h[3] + d & 0Xf_f_f_f_f_f_f_f,
self.h[4] + e & 0Xf_f_f_f_f_f_f_f,
)
return ("{:08x}" * 5).format(*self.h)
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] =b'Test String'
assert SHAaHash(__lowerCamelCase ).final_hash() == hashlib.shaa(__lowerCamelCase ).hexdigest() # noqa: S324
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : Dict =argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' )
_UpperCAmelCase : List[str] =parser.parse_args()
_UpperCAmelCase : Union[str, Any] =args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
_UpperCAmelCase : Any =f.read()
else:
_UpperCAmelCase : int =bytes(__lowerCamelCase , 'utf-8' )
print(SHAaHash(__lowerCamelCase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 446 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ):
"""simple docstring"""
if isinstance(__magic_name__ , torch.Tensor ):
return image
elif isinstance(__magic_name__ , PIL.Image.Image ):
_lowerCAmelCase :Tuple = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowerCAmelCase :List[Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_lowerCAmelCase :Optional[Any] = np.concatenate(__magic_name__ , axis=0 )
_lowerCAmelCase :Any = np.array(__magic_name__ ).astype(np.floataa ) / 255.0
_lowerCAmelCase :Optional[int] = image.transpose(0 , 3 , 1 , 2 )
_lowerCAmelCase :int = 2.0 * image - 1.0
_lowerCAmelCase :Optional[int] = torch.from_numpy(__magic_name__ )
elif isinstance(image[0] , torch.Tensor ):
_lowerCAmelCase :str = torch.cat(__magic_name__ , dim=0 )
return image
def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : int=0.9995 ):
"""simple docstring"""
if not isinstance(__magic_name__ , np.ndarray ):
_lowerCAmelCase :Tuple = True
_lowerCAmelCase :str = va.device
_lowerCAmelCase :List[str] = va.cpu().numpy()
_lowerCAmelCase :List[str] = va.cpu().numpy()
_lowerCAmelCase :Any = np.sum(va * va / (np.linalg.norm(__magic_name__ ) * np.linalg.norm(__magic_name__ )) )
if np.abs(__magic_name__ ) > DOT_THRESHOLD:
_lowerCAmelCase :Optional[Any] = (1 - t) * va + t * va
else:
_lowerCAmelCase :int = np.arccos(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = np.sin(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = theta_a * t
_lowerCAmelCase :str = np.sin(__magic_name__ )
_lowerCAmelCase :Any = np.sin(theta_a - theta_t ) / sin_theta_a
_lowerCAmelCase :Optional[Any] = sin_theta_t / sin_theta_a
_lowerCAmelCase :List[Any] = sa * va + sa * va
if inputs_are_torch:
_lowerCAmelCase :int = torch.from_numpy(__magic_name__ ).to(__magic_name__ )
return va
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :Any = F.normalize(__magic_name__ , dim=-1 )
_lowerCAmelCase :str = F.normalize(__magic_name__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
for param in model.parameters():
_lowerCAmelCase :List[str] = value
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def __init__( self: Any , _UpperCAmelCase: AutoencoderKL , _UpperCAmelCase: CLIPTextModel , _UpperCAmelCase: CLIPModel , _UpperCAmelCase: CLIPTokenizer , _UpperCAmelCase: UNetaDConditionModel , _UpperCAmelCase: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , _UpperCAmelCase: CLIPFeatureExtractor , _UpperCAmelCase: str=None , _UpperCAmelCase: Tuple=None , _UpperCAmelCase: Union[str, Any]=None , ):
super().__init__()
self.register_modules(
vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , clip_model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , coca_model=_UpperCAmelCase , coca_tokenizer=_UpperCAmelCase , coca_transform=_UpperCAmelCase , )
_lowerCAmelCase :int = (
feature_extractor.size
if isinstance(feature_extractor.size , _UpperCAmelCase )
else feature_extractor.size['shortest_edge']
)
_lowerCAmelCase :Union[str, Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _UpperCAmelCase )
set_requires_grad(self.clip_model , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowerCAmelCase :Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
self.enable_attention_slicing(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
set_requires_grad(self.vae , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
set_requires_grad(self.vae , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
set_requires_grad(self.unet , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
set_requires_grad(self.unet , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Dict ):
# get the original timestep using init_timestep
_lowerCAmelCase :Optional[Any] = min(int(num_inference_steps * strength ) , _UpperCAmelCase )
_lowerCAmelCase :List[str] = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase :Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Union[str, Any]=None ):
if not isinstance(_UpperCAmelCase , torch.Tensor ):
raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(_UpperCAmelCase )}""" )
_lowerCAmelCase :Union[str, Any] = image.to(device=_UpperCAmelCase , dtype=_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :List[Any] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCAmelCase )
]
_lowerCAmelCase :List[str] = torch.cat(_UpperCAmelCase , dim=0 )
else:
_lowerCAmelCase :List[str] = self.vae.encode(_UpperCAmelCase ).latent_dist.sample(_UpperCAmelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :List[Any] = 0.1_8_2_1_5 * init_latents
_lowerCAmelCase :List[Any] = init_latents.repeat_interleave(_UpperCAmelCase , dim=0 )
_lowerCAmelCase :Dict = randn_tensor(init_latents.shape , generator=_UpperCAmelCase , device=_UpperCAmelCase , dtype=_UpperCAmelCase )
# get latents
_lowerCAmelCase :Dict = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :List[str] = init_latents
return latents
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Union[str, Any] ):
_lowerCAmelCase :Optional[int] = self.coca_transform(_UpperCAmelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_lowerCAmelCase :Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_lowerCAmelCase :int = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: List[str] ):
_lowerCAmelCase :Optional[int] = self.feature_extractor.preprocess(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
_lowerCAmelCase :List[str] = self.clip_model.get_image_features(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase )
_lowerCAmelCase :Dict = image_embeddings_clip.repeat_interleave(_UpperCAmelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , _UpperCAmelCase: Dict , _UpperCAmelCase: str , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , ):
_lowerCAmelCase :Dict = latents.detach().requires_grad_()
_lowerCAmelCase :Optional[Any] = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
_lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_lowerCAmelCase :int = self.scheduler.alphas_cumprod[timestep]
_lowerCAmelCase :Optional[int] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase :str = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_lowerCAmelCase :Optional[Any] = torch.sqrt(_UpperCAmelCase )
_lowerCAmelCase :List[str] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _UpperCAmelCase ):
_lowerCAmelCase :Dict = self.scheduler.sigmas[index]
_lowerCAmelCase :Optional[Any] = latents - sigma * noise_pred
else:
raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :Tuple = 1 / 0.1_8_2_1_5 * sample
_lowerCAmelCase :Optional[Any] = self.vae.decode(_UpperCAmelCase ).sample
_lowerCAmelCase :List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase :Tuple = transforms.Resize(self.feature_extractor_size )(_UpperCAmelCase )
_lowerCAmelCase :Tuple = self.normalize(_UpperCAmelCase ).to(latents.dtype )
_lowerCAmelCase :List[Any] = self.clip_model.get_image_features(_UpperCAmelCase )
_lowerCAmelCase :List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase )
_lowerCAmelCase :Tuple = spherical_dist_loss(_UpperCAmelCase , _UpperCAmelCase ).mean() * clip_guidance_scale
_lowerCAmelCase :str = -torch.autograd.grad(_UpperCAmelCase , _UpperCAmelCase )[0]
if isinstance(self.scheduler , _UpperCAmelCase ):
_lowerCAmelCase :Union[str, Any] = latents.detach() + grads * (sigma**2)
_lowerCAmelCase :Dict = noise_pred_original
else:
_lowerCAmelCase :Optional[int] = noise_pred_original - torch.sqrt(_UpperCAmelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self: Optional[int] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: float = 0.6 , _UpperCAmelCase: Optional[int] = 50 , _UpperCAmelCase: Optional[float] = 7.5 , _UpperCAmelCase: Optional[int] = 1 , _UpperCAmelCase: float = 0.0 , _UpperCAmelCase: Optional[float] = 100 , _UpperCAmelCase: Optional[torch.Generator] = None , _UpperCAmelCase: Optional[str] = "pil" , _UpperCAmelCase: bool = True , _UpperCAmelCase: float = 0.8 , _UpperCAmelCase: float = 0.1 , _UpperCAmelCase: float = 0.1 , ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size:
raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(_UpperCAmelCase )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(_UpperCAmelCase , torch.Generator ) and batch_size > 1:
_lowerCAmelCase :int = [generator] + [None] * (batch_size - 1)
_lowerCAmelCase :List[Any] = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
_lowerCAmelCase :Optional[int] = [x[0] for x in coca_is_none if x[1]]
_lowerCAmelCase :List[str] = ', '.join(_UpperCAmelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_UpperCAmelCase ):
raise ValueError(
f"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowerCAmelCase :List[Any] = self.get_image_description(_UpperCAmelCase )
if style_prompt is None:
if len(_UpperCAmelCase ):
raise ValueError(
f"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowerCAmelCase :Any = self.get_image_description(_UpperCAmelCase )
# get prompt text embeddings for content and style
_lowerCAmelCase :Any = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , )
_lowerCAmelCase :str = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_lowerCAmelCase :int = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , )
_lowerCAmelCase :Union[str, Any] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_lowerCAmelCase :List[str] = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# duplicate text embeddings for each generation per prompt
_lowerCAmelCase :str = text_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 )
# set timesteps
_lowerCAmelCase :Any = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_lowerCAmelCase :Dict = {}
if accepts_offset:
_lowerCAmelCase :Optional[int] = 1
self.scheduler.set_timesteps(_UpperCAmelCase , **_UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_lowerCAmelCase , _lowerCAmelCase :List[str] = self.get_timesteps(_UpperCAmelCase , _UpperCAmelCase , self.device )
_lowerCAmelCase :int = timesteps[:1].repeat(_UpperCAmelCase )
# Preprocess image
_lowerCAmelCase :Dict = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :int = self.prepare_latents(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase )
_lowerCAmelCase :Any = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.prepare_latents(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase )
_lowerCAmelCase :str = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if clip_guidance_scale > 0:
_lowerCAmelCase :Optional[Any] = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Dict = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Any = slerp(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCAmelCase :int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase :Optional[int] = content_text_input.input_ids.shape[-1]
_lowerCAmelCase :Union[str, Any] = self.tokenizer([''] , padding='max_length' , max_length=_UpperCAmelCase , return_tensors='pt' )
_lowerCAmelCase :Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_lowerCAmelCase :Optional[int] = uncond_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase :int = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCAmelCase :Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_lowerCAmelCase :Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_lowerCAmelCase :Any = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device='cpu' , dtype=_UpperCAmelCase ).to(
self.device )
else:
_lowerCAmelCase :List[Any] = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_lowerCAmelCase :int = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase :Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase :Any = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase :Any = {}
if accepts_eta:
_lowerCAmelCase :Any = eta
# check if the scheduler accepts generator
_lowerCAmelCase :List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_lowerCAmelCase :List[Any] = generator
with self.progress_bar(total=_UpperCAmelCase ):
for i, t in enumerate(_UpperCAmelCase ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase :Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase :Tuple = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
_lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase :List[str] = noise_pred.chunk(2 )
_lowerCAmelCase :Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_lowerCAmelCase :List[Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_lowerCAmelCase , _lowerCAmelCase :List[str] = self.cond_fn(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase :str = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :str = 1 / 0.1_8_2_1_5 * latents
_lowerCAmelCase :Any = self.vae.decode(_UpperCAmelCase ).sample
_lowerCAmelCase :List[str] = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase :Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase :List[Any] = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_UpperCAmelCase , nsfw_content_detected=_UpperCAmelCase ) | 687 | 0 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCAmelCase__ : str =numpy.array([0, 0])
lowerCAmelCase__ : Dict =numpy.array([0.5, 0.8_6_6_0_2_5_4])
lowerCAmelCase__ : Tuple =numpy.array([1, 0])
lowerCAmelCase__ : Dict =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : str = initial_vectors
for _ in range(A__ ):
SCREAMING_SNAKE_CASE_ : Tuple = iteration_step(A__ )
return vectors
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : List[str] = []
for i, start_vector in enumerate(vectors[:-1] ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = vectors[i + 1]
new_vectors.append(A__ )
SCREAMING_SNAKE_CASE_ : List[str] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3, 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = numpy.radians(A__ )
SCREAMING_SNAKE_CASE_ : Dict = numpy.cos(A__ ), numpy.sin(A__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = numpy.array(((c, -s), (s, c)) )
return numpy.dot(A__, A__ )
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Dict = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
SCREAMING_SNAKE_CASE_ : str = zip(*A__ )
plt.plot(A__, A__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ : List[Any] =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 101 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :Optional[int] = list(__magic_name__ )
_lowerCAmelCase :Dict = list(__magic_name__ )
_lowerCAmelCase :Any = 0
for i in range(len(__magic_name__ ) ):
if lista[i] != lista[i]:
count += 1
_lowerCAmelCase :Union[str, Any] = '_'
if count > 1:
return False
else:
return "".join(__magic_name__ )
def UpperCamelCase_( __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :int = []
while True:
_lowerCAmelCase :str = ['$'] * len(__magic_name__ )
_lowerCAmelCase :Optional[int] = []
for i in range(len(__magic_name__ ) ):
for j in range(i + 1 , len(__magic_name__ ) ):
_lowerCAmelCase :int = compare_string(binary[i] , binary[j] )
if k is False:
_lowerCAmelCase :str = '*'
_lowerCAmelCase :Union[str, Any] = '*'
temp.append('X' )
for i in range(len(__magic_name__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__magic_name__ ) == 0:
return pi
_lowerCAmelCase :Any = list(set(__magic_name__ ) )
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : Sequence[float] ):
"""simple docstring"""
_lowerCAmelCase :str = []
for minterm in minterms:
_lowerCAmelCase :Any = ''
for _ in range(__magic_name__ ):
_lowerCAmelCase :Tuple = str(minterm % 2 ) + string
minterm //= 2
temp.append(__magic_name__ )
return temp
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str , __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = list(__magic_name__ )
_lowerCAmelCase :List[Any] = list(__magic_name__ )
_lowerCAmelCase :Optional[Any] = 0
for i in range(len(__magic_name__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase_( __magic_name__ : list[list[int]] , __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :str = []
_lowerCAmelCase :List[str] = [0] * len(__magic_name__ )
for i in range(len(chart[0] ) ):
_lowerCAmelCase :Dict = 0
_lowerCAmelCase :Optional[Any] = -1
for j in range(len(__magic_name__ ) ):
if chart[j][i] == 1:
count += 1
_lowerCAmelCase :List[Any] = j
if count == 1:
_lowerCAmelCase :Dict = 1
for i in range(len(__magic_name__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__magic_name__ ) ):
_lowerCAmelCase :Dict = 0
temp.append(prime_implicants[i] )
while True:
_lowerCAmelCase :Dict = 0
_lowerCAmelCase :Any = -1
_lowerCAmelCase :Optional[Any] = 0
for i in range(len(__magic_name__ ) ):
_lowerCAmelCase :str = chart[i].count(1 )
if count_n > max_n:
_lowerCAmelCase :Optional[Any] = count_n
_lowerCAmelCase :Dict = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__magic_name__ ) ):
_lowerCAmelCase :str = 0
def UpperCamelCase_( __magic_name__ : list[str] , __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :str = [[0 for x in range(len(__magic_name__ ) )] for x in range(len(__magic_name__ ) )]
for i in range(len(__magic_name__ ) ):
_lowerCAmelCase :Tuple = prime_implicants[i].count('_' )
for j in range(len(__magic_name__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , __magic_name__ ):
_lowerCAmelCase :str = 1
return chart
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :Tuple = int(input('Enter the no. of variables\n' ) )
_lowerCAmelCase :Tuple = [
float(__magic_name__ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
_lowerCAmelCase :List[str] = decimal_to_binary(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Any = check(__magic_name__ )
print('Prime Implicants are:' )
print(__magic_name__ )
_lowerCAmelCase :List[Any] = prime_implicant_chart(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Tuple = selection(__magic_name__ , __magic_name__ )
print('Essential Prime Implicants are:' )
print(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 687 | 0 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
_a : str = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
_a : List[Any] = json.load(f)
@require_torch
class _lowercase ( unittest.TestCase ):
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple ) -> Dict:
return FSMTTokenizer.from_pretrained(_UpperCAmelCase )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[str]:
__snake_case = FSMTForConditionalGeneration.from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict ) -> int:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
__snake_case = f'facebook/wmt19-{pair}'
__snake_case = self.get_tokenizer(_UpperCAmelCase )
__snake_case = self.get_model(_UpperCAmelCase )
__snake_case = bleu_data[pair]['src']
__snake_case = bleu_data[pair]['tgt']
__snake_case = tokenizer(_UpperCAmelCase , return_tensors='pt' , truncation=_UpperCAmelCase , padding='longest' ).to(_UpperCAmelCase )
__snake_case = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
__snake_case = tokenizer.batch_decode(
_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
__snake_case = calculate_bleu(_UpperCAmelCase , _UpperCAmelCase )
print(_UpperCAmelCase )
self.assertGreaterEqual(scores['bleu'] , _UpperCAmelCase )
| 56 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
a = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
a = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
a = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[int]=4 , _UpperCAmelCase: Optional[int]=False ):
_lowerCAmelCase :Any = compute_bleu(
reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) :Tuple = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
} | 687 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class A_ ( snake_case__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : List[str] = 'levit'
def __init__( self , _A=224 , _A=3 , _A=3 , _A=2 , _A=1 , _A=16 , _A=[128, 256, 384] , _A=[4, 8, 12] , _A=[4, 4, 4] , _A=[16, 16, 16] , _A=0 , _A=[2, 2, 2] , _A=[2, 2, 2] , _A=0.02 , **_A , ) -> str:
"""simple docstring"""
super().__init__(**_UpperCAmelCase)
_UpperCAmelCase : Optional[int] = image_size
_UpperCAmelCase : List[str] = num_channels
_UpperCAmelCase : Optional[Any] = kernel_size
_UpperCAmelCase : Union[str, Any] = stride
_UpperCAmelCase : List[Any] = padding
_UpperCAmelCase : Dict = hidden_sizes
_UpperCAmelCase : Optional[int] = num_attention_heads
_UpperCAmelCase : Tuple = depths
_UpperCAmelCase : str = key_dim
_UpperCAmelCase : List[Any] = drop_path_rate
_UpperCAmelCase : Tuple = patch_size
_UpperCAmelCase : Optional[int] = attention_ratio
_UpperCAmelCase : List[Any] = mlp_ratio
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Tuple = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class A_ ( snake_case__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[int] = version.parse("1.11" )
@property
def snake_case__ ( self) -> int:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def snake_case__ ( self) -> Dict:
"""simple docstring"""
return 1e-4
| 485 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowercase__ ( snake_case__ ):
'''simple docstring'''
_UpperCAmelCase = 42
class lowercase__ ( snake_case__, snake_case__ ):
'''simple docstring'''
@register_to_config
def __init__( self , snake_case = 65536 , snake_case = None , snake_case = 2 , snake_case = 2 , snake_case = 0 , snake_case = "fourier" , snake_case = True , snake_case = False , snake_case = 0.0 , snake_case = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , snake_case = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , snake_case = "UNetMidBlock1D" , snake_case = None , snake_case = (32, 32, 64) , snake_case = None , snake_case = 8 , snake_case = 1 , snake_case = False , ) -> Any:
super().__init__()
_UpperCAmelCase = sample_size
# time
if time_embedding_type == "fourier":
_UpperCAmelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_UpperCAmelCase , log=_UpperCAmelCase , flip_sin_to_cos=_UpperCAmelCase )
_UpperCAmelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_UpperCAmelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_UpperCAmelCase , downscale_freq_shift=_UpperCAmelCase )
_UpperCAmelCase = block_out_channels[0]
if use_timestep_embedding:
_UpperCAmelCase = block_out_channels[0] * 4
_UpperCAmelCase = TimestepEmbedding(
in_channels=_UpperCAmelCase , time_embed_dim=_UpperCAmelCase , act_fn=_UpperCAmelCase , out_dim=block_out_channels[0] , )
_UpperCAmelCase = nn.ModuleList([] )
_UpperCAmelCase = None
_UpperCAmelCase = nn.ModuleList([] )
_UpperCAmelCase = None
# down
_UpperCAmelCase = in_channels
for i, down_block_type in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = output_channel
_UpperCAmelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_UpperCAmelCase = i == len(_UpperCAmelCase ) - 1
_UpperCAmelCase = get_down_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_UpperCAmelCase )
# mid
_UpperCAmelCase = get_mid_block(
_UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_UpperCAmelCase , add_downsample=_UpperCAmelCase , )
# up
_UpperCAmelCase = list(reversed(_UpperCAmelCase ) )
_UpperCAmelCase = reversed_block_out_channels[0]
if out_block_type is None:
_UpperCAmelCase = out_channels
else:
_UpperCAmelCase = block_out_channels[0]
for i, up_block_type in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = output_channel
_UpperCAmelCase = (
reversed_block_out_channels[i + 1] if i < len(_UpperCAmelCase ) - 1 else final_upsample_channels
)
_UpperCAmelCase = i == len(_UpperCAmelCase ) - 1
_UpperCAmelCase = get_up_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_UpperCAmelCase )
_UpperCAmelCase = output_channel
# out
_UpperCAmelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
_UpperCAmelCase = get_out_block(
out_block_type=_UpperCAmelCase , num_groups_out=_UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=_UpperCAmelCase , act_fn=_UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case = True , ) -> Union[str, Any]:
_UpperCAmelCase = timestep
if not torch.is_tensor(_UpperCAmelCase ):
_UpperCAmelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(_UpperCAmelCase ) and len(timesteps.shape ) == 0:
_UpperCAmelCase = timesteps[None].to(sample.device )
_UpperCAmelCase = self.time_proj(_UpperCAmelCase )
if self.config.use_timestep_embedding:
_UpperCAmelCase = self.time_mlp(_UpperCAmelCase )
else:
_UpperCAmelCase = timestep_embed[..., None]
_UpperCAmelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_UpperCAmelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_UpperCAmelCase = ()
for downsample_block in self.down_blocks:
_UpperCAmelCase = downsample_block(hidden_states=_UpperCAmelCase , temb=_UpperCAmelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_UpperCAmelCase = self.mid_block(_UpperCAmelCase , _UpperCAmelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_UpperCAmelCase = down_block_res_samples[-1:]
_UpperCAmelCase = down_block_res_samples[:-1]
_UpperCAmelCase = upsample_block(_UpperCAmelCase , res_hidden_states_tuple=_UpperCAmelCase , temb=_UpperCAmelCase )
# 5. post-process
if self.out_block:
_UpperCAmelCase = self.out_block(_UpperCAmelCase , _UpperCAmelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_UpperCAmelCase )
| 573 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self: str , _UpperCAmelCase: str , _UpperCAmelCase: Optional[int]=7 , _UpperCAmelCase: Union[str, Any]=3 , _UpperCAmelCase: int=18 , _UpperCAmelCase: List[Any]=30 , _UpperCAmelCase: List[Any]=400 , _UpperCAmelCase: Optional[Any]=True , _UpperCAmelCase: Any=None , _UpperCAmelCase: Any=True , _UpperCAmelCase: int=None , _UpperCAmelCase: Union[str, Any]=True , ):
_lowerCAmelCase :Tuple = size if size is not None else {'shortest_edge': 20}
_lowerCAmelCase :str = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_lowerCAmelCase :str = parent
_lowerCAmelCase :List[Any] = batch_size
_lowerCAmelCase :Optional[Any] = num_channels
_lowerCAmelCase :Optional[Any] = image_size
_lowerCAmelCase :int = min_resolution
_lowerCAmelCase :List[str] = max_resolution
_lowerCAmelCase :List[str] = do_resize
_lowerCAmelCase :Optional[int] = size
_lowerCAmelCase :str = do_center_crop
_lowerCAmelCase :int = crop_size
_lowerCAmelCase :Optional[int] = do_flip_channel_order
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Any = MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = MobileViTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self: str ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'center_crop' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_flip_channel_order' ) )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_lowerCAmelCase :Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self: int ):
# Initialize image_processing
_lowerCAmelCase :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
_lowerCAmelCase :Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :str = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
# Initialize image_processing
_lowerCAmelCase :int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :List[str] = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
# Initialize image_processing
_lowerCAmelCase :Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :int = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , ) | 687 | 0 |
def __lowerCAmelCase ( A_ : list ) -> List[Any]:
__UpperCAmelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
__UpperCAmelCase = True
for i in range(0 , len(A_ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__UpperCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__UpperCAmelCase = False
for i in range(1 , len(A_ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__UpperCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__UpperCAmelCase = False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
a_ = [int(x) for x in input().split()]
# inputing elements of the list in one line
a_ = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 221 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ (datasets.BuilderConfig ):
"""simple docstring"""
lowerCamelCase : Optional[datasets.Features] = None
class UpperCAmelCase_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCamelCase : Any = PandasConfig
def SCREAMING_SNAKE_CASE__ ( self: int ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: List[str] ):
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_lowerCAmelCase :Dict = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_UpperCAmelCase , (str, list, tuple) ):
_lowerCAmelCase :Any = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase :List[Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
_lowerCAmelCase :Any = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase :Union[str, Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={'files': files} ) )
return splits
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: pa.Table ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase :str = table_cast(_UpperCAmelCase , self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: Dict ):
for i, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase ) ):
with open(_UpperCAmelCase , 'rb' ) as f:
_lowerCAmelCase :Optional[Any] = pa.Table.from_pandas(pd.read_pickle(_UpperCAmelCase ) )
yield i, self._cast_table(_UpperCAmelCase ) | 687 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 94 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
a = """"""
a = """"""
a = """"""
a = 1 # (0 is vertical, 1 is horizontal)
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase :Union[str, Any] = get_dataset(__magic_name__ , __magic_name__ )
print('Processing...' )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = update_image_and_anno(__magic_name__ , __magic_name__ , __magic_name__ )
for index, image in enumerate(__magic_name__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase :Optional[Any] = random_chars(32 )
_lowerCAmelCase :str = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
_lowerCAmelCase :Tuple = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , __magic_name__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(__magic_name__ )} with {file_name}""" )
_lowerCAmelCase :str = []
for anno in new_annos[index]:
_lowerCAmelCase :List[str] = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(__magic_name__ )
with open(f"""/{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :int = []
_lowerCAmelCase :Union[str, Any] = []
for label_file in glob.glob(os.path.join(__magic_name__ , '*.txt' ) ):
_lowerCAmelCase :Optional[int] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(__magic_name__ ) as in_file:
_lowerCAmelCase :Union[str, Any] = in_file.readlines()
_lowerCAmelCase :List[Any] = os.path.join(__magic_name__ , f"""{label_name}.jpg""" )
_lowerCAmelCase :Tuple = []
for obj_list in obj_lists:
_lowerCAmelCase :Union[str, Any] = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__magic_name__ )
labels.append(__magic_name__ )
return img_paths, labels
def UpperCamelCase_( __magic_name__ : list , __magic_name__ : list , __magic_name__ : int = 1 ):
"""simple docstring"""
_lowerCAmelCase :str = []
_lowerCAmelCase :Any = []
_lowerCAmelCase :Optional[Any] = []
for idx in range(len(__magic_name__ ) ):
_lowerCAmelCase :Optional[int] = []
_lowerCAmelCase :Optional[Any] = img_list[idx]
path_list.append(__magic_name__ )
_lowerCAmelCase :List[str] = anno_list[idx]
_lowerCAmelCase :Optional[Any] = cva.imread(__magic_name__ )
if flip_type == 1:
_lowerCAmelCase :List[Any] = cva.flip(__magic_name__ , __magic_name__ )
for bbox in img_annos:
_lowerCAmelCase :List[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_lowerCAmelCase :List[str] = cva.flip(__magic_name__ , __magic_name__ )
for bbox in img_annos:
_lowerCAmelCase :List[str] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__magic_name__ )
new_imgs_list.append(__magic_name__ )
return new_imgs_list, new_annos_lists, path_list
def UpperCamelCase_( __magic_name__ : int = 32 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase :str = ascii_lowercase + digits
return "".join(random.choice(__magic_name__ ) for _ in range(__magic_name__ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""") | 687 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Union[str, Any] = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
_lowerCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 429 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
a = logging.get_logger(__name__)
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = nn.functional.normalize(__magic_name__ )
_lowerCAmelCase :List[str] = nn.functional.normalize(__magic_name__ )
return torch.mm(__magic_name__ , normalized_text_embeds.t() )
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : str = CLIPConfig
lowerCamelCase : Any = ['CLIPEncoderLayer']
def __init__( self: Optional[int] , _UpperCAmelCase: CLIPConfig ):
super().__init__(_UpperCAmelCase )
_lowerCAmelCase :Any = CLIPVisionModel(config.vision_config )
_lowerCAmelCase :Optional[int] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_UpperCAmelCase )
_lowerCAmelCase :int = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :Any = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :str = nn.Parameter(torch.ones(17 ) , requires_grad=_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = nn.Parameter(torch.ones(3 ) , requires_grad=_UpperCAmelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Dict ):
_lowerCAmelCase :str = self.vision_model(_UpperCAmelCase )[1] # pooled_output
_lowerCAmelCase :Union[str, Any] = self.visual_projection(_UpperCAmelCase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase :Optional[int] = cosine_distance(_UpperCAmelCase , self.special_care_embeds ).cpu().float().numpy()
_lowerCAmelCase :List[str] = cosine_distance(_UpperCAmelCase , self.concept_embeds ).cpu().float().numpy()
_lowerCAmelCase :str = []
_lowerCAmelCase :List[Any] = image_embeds.shape[0]
for i in range(_UpperCAmelCase ):
_lowerCAmelCase :Optional[Any] = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
_lowerCAmelCase :List[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
_lowerCAmelCase :List[Any] = special_cos_dist[i][concept_idx]
_lowerCAmelCase :Dict = self.special_care_embeds_weights[concept_idx].item()
_lowerCAmelCase :List[Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} )
_lowerCAmelCase :Any = 0.0_1
for concept_idx in range(len(cos_dist[0] ) ):
_lowerCAmelCase :Union[str, Any] = cos_dist[i][concept_idx]
_lowerCAmelCase :str = self.concept_embeds_weights[concept_idx].item()
_lowerCAmelCase :str = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_UpperCAmelCase )
result.append(_UpperCAmelCase )
_lowerCAmelCase :Any = [len(res['bad_concepts'] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self: str , _UpperCAmelCase: torch.FloatTensor , _UpperCAmelCase: torch.FloatTensor ):
_lowerCAmelCase :Optional[int] = self.vision_model(_UpperCAmelCase )[1] # pooled_output
_lowerCAmelCase :Union[str, Any] = self.visual_projection(_UpperCAmelCase )
_lowerCAmelCase :Dict = cosine_distance(_UpperCAmelCase , self.special_care_embeds )
_lowerCAmelCase :List[str] = cosine_distance(_UpperCAmelCase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
_lowerCAmelCase :Any = 0.0
_lowerCAmelCase :Union[str, Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
_lowerCAmelCase :Tuple = torch.any(special_scores > 0 , dim=1 )
_lowerCAmelCase :List[str] = special_care * 0.0_1
_lowerCAmelCase :Any = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
_lowerCAmelCase :Optional[Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
_lowerCAmelCase :List[str] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts | 687 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[jnp.ndarray] = None
__UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def snake_case ( cls : List[str] ):
return cls()
@dataclass
class lowerCAmelCase__ ( snake_case__ ):
"""simple docstring"""
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : KarrasVeSchedulerState
class lowerCAmelCase__ ( snake_case__ , snake_case__ ):
"""simple docstring"""
@property
def snake_case ( self : Union[str, Any] ):
return True
@register_to_config
def __init__( self : int , lowercase__ : float = 0.0_2 , lowercase__ : float = 1_0_0 , lowercase__ : float = 1.0_0_7 , lowercase__ : float = 8_0 , lowercase__ : float = 0.0_5 , lowercase__ : float = 5_0 , ):
pass
def snake_case ( self : Union[str, Any] ):
return KarrasVeSchedulerState.create()
def snake_case ( self : Optional[int] , lowercase__ : KarrasVeSchedulerState , lowercase__ : int , lowercase__ : Tuple = () ):
__lowercase : Tuple = jnp.arange(0 , _UpperCAmelCase )[::-1].copy()
__lowercase : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=_UpperCAmelCase , schedule=jnp.array(_UpperCAmelCase , dtype=jnp.floataa ) , timesteps=_UpperCAmelCase , )
def snake_case ( self : Tuple , lowercase__ : KarrasVeSchedulerState , lowercase__ : jnp.ndarray , lowercase__ : float , lowercase__ : random.KeyArray , ):
if self.config.s_min <= sigma <= self.config.s_max:
__lowercase : Optional[int] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
__lowercase : List[Any] = 0
# sample eps ~ N(0, S_noise^2 * I)
__lowercase : str = random.split(_UpperCAmelCase , num=1 )
__lowercase : Optional[int] = self.config.s_noise * random.normal(key=_UpperCAmelCase , shape=sample.shape )
__lowercase : Tuple = sigma + gamma * sigma
__lowercase : Optional[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def snake_case ( self : Optional[Any] , lowercase__ : KarrasVeSchedulerState , lowercase__ : jnp.ndarray , lowercase__ : float , lowercase__ : float , lowercase__ : jnp.ndarray , lowercase__ : bool = True , ):
__lowercase : List[Any] = sample_hat + sigma_hat * model_output
__lowercase : Union[str, Any] = (sample_hat - pred_original_sample) / sigma_hat
__lowercase : int = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_UpperCAmelCase , derivative=_UpperCAmelCase , state=_UpperCAmelCase )
def snake_case ( self : Tuple , lowercase__ : KarrasVeSchedulerState , lowercase__ : jnp.ndarray , lowercase__ : float , lowercase__ : float , lowercase__ : jnp.ndarray , lowercase__ : jnp.ndarray , lowercase__ : jnp.ndarray , lowercase__ : bool = True , ):
__lowercase : Union[str, Any] = sample_prev + sigma_prev * model_output
__lowercase : str = (sample_prev - pred_original_sample) / sigma_prev
__lowercase : int = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_UpperCAmelCase , derivative=_UpperCAmelCase , state=_UpperCAmelCase )
def snake_case ( self : Optional[int] , lowercase__ : KarrasVeSchedulerState , lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : Any ):
raise NotImplementedError()
| 575 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a = 6_3_7_8_1_3_7.0
a = 6_3_5_6_7_5_2.3_1_4_2_4_5
a = 6_378_137
def UpperCamelCase_( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , __magic_name__ : float ):
"""simple docstring"""
_lowerCAmelCase :List[Any] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_lowerCAmelCase :Union[str, Any] = atan((1 - flattening) * tan(radians(__magic_name__ ) ) )
_lowerCAmelCase :List[str] = atan((1 - flattening) * tan(radians(__magic_name__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_lowerCAmelCase :int = haversine_distance(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_lowerCAmelCase :str = (b_lata + b_lata) / 2
_lowerCAmelCase :Tuple = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_lowerCAmelCase :str = (sin(__magic_name__ ) ** 2) * (cos(__magic_name__ ) ** 2)
_lowerCAmelCase :Optional[int] = cos(sigma / 2 ) ** 2
_lowerCAmelCase :List[Any] = (sigma - sin(__magic_name__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_lowerCAmelCase :Dict = (cos(__magic_name__ ) ** 2) * (sin(__magic_name__ ) ** 2)
_lowerCAmelCase :str = sin(sigma / 2 ) ** 2
_lowerCAmelCase :Union[str, Any] = (sigma + sin(__magic_name__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod() | 687 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase_ = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''SpeechEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''FlaxSpeechEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 338 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Dict = 'encoder-decoder'
lowerCamelCase : Optional[Any] = True
def __init__( self: str , **_UpperCAmelCase: int ):
super().__init__(**_UpperCAmelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_lowerCAmelCase :Optional[Any] = kwargs.pop('encoder' )
_lowerCAmelCase :Dict = encoder_config.pop('model_type' )
_lowerCAmelCase :str = kwargs.pop('decoder' )
_lowerCAmelCase :str = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase :str = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Tuple = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Any = True
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls: Tuple , _UpperCAmelCase: PretrainedConfig , _UpperCAmelCase: PretrainedConfig , **_UpperCAmelCase: str ):
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
_lowerCAmelCase :Dict = True
_lowerCAmelCase :List[str] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowerCAmelCase :Optional[int] = self.encoder.to_dict()
_lowerCAmelCase :Union[str, Any] = self.decoder.to_dict()
_lowerCAmelCase :List[str] = self.__class__.model_type
return output | 687 | 0 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase = logging.getLogger()
__UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __a ( snake_case__ ):
def A ( self : Dict , UpperCAmelCase : Optional[int] ):
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
lowerCAmelCase_ : int = {'source': 'What is love ?', 'target': 'life'}
lowerCAmelCase_ : str = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCAmelCase_ : Optional[int] = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(_UpperCAmelCase , F'{split}.{field}' ) , """w""" ) as f:
f.write(_UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : str = "pytorch" ):
lowerCAmelCase_ : Dict = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : int = os.path.join(_UpperCAmelCase , """output""" )
lowerCAmelCase_ : Tuple = os.path.join(_UpperCAmelCase , """data""" )
self._create_dummy_data(data_dir=_UpperCAmelCase )
lowerCAmelCase_ : Dict = F'\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n '.split()
if gpus > 0:
testargs.append(F'--gpus={gpus}' )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
lowerCAmelCase_ : str = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(_UpperCAmelCase , env=self.get_env() )
lowerCAmelCase_ : Optional[int] = os.path.join(_UpperCAmelCase , """metrics.json""" )
with open(_UpperCAmelCase ) as f:
lowerCAmelCase_ : List[str] = json.load(_UpperCAmelCase )
return result
@require_torch_gpu
def A ( self : Tuple ):
lowerCAmelCase_ : Tuple = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def A ( self : Tuple ):
lowerCAmelCase_ : Optional[int] = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def A ( self : List[Any] ):
lowerCAmelCase_ : str = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def A ( self : List[Any] ):
lowerCAmelCase_ : Dict = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 600 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: int , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=13 , _UpperCAmelCase: Optional[Any]=32 , _UpperCAmelCase: List[Any]=2 , _UpperCAmelCase: Optional[int]=3 , _UpperCAmelCase: Optional[int]=16 , _UpperCAmelCase: Optional[Any]=[32, 64, 128] , _UpperCAmelCase: Optional[int]=[1, 2, 1] , _UpperCAmelCase: int=[2, 2, 4] , _UpperCAmelCase: List[str]=2 , _UpperCAmelCase: Dict=2.0 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: str=0.0 , _UpperCAmelCase: int=0.0 , _UpperCAmelCase: str=0.1 , _UpperCAmelCase: Dict="gelu" , _UpperCAmelCase: Optional[Any]=False , _UpperCAmelCase: Union[str, Any]=True , _UpperCAmelCase: Union[str, Any]=0.0_2 , _UpperCAmelCase: Optional[int]=1e-5 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: Optional[Any]=None , _UpperCAmelCase: Tuple=True , _UpperCAmelCase: str=10 , _UpperCAmelCase: int=8 , _UpperCAmelCase: List[Any]=["stage1", "stage2"] , _UpperCAmelCase: List[Any]=[1, 2] , ):
_lowerCAmelCase :Optional[int] = parent
_lowerCAmelCase :Dict = batch_size
_lowerCAmelCase :Optional[Any] = image_size
_lowerCAmelCase :Optional[Any] = patch_size
_lowerCAmelCase :List[Any] = num_channels
_lowerCAmelCase :Optional[int] = embed_dim
_lowerCAmelCase :List[str] = hidden_sizes
_lowerCAmelCase :Union[str, Any] = depths
_lowerCAmelCase :int = num_heads
_lowerCAmelCase :Any = window_size
_lowerCAmelCase :List[Any] = mlp_ratio
_lowerCAmelCase :Optional[int] = qkv_bias
_lowerCAmelCase :Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase :Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase :Dict = drop_path_rate
_lowerCAmelCase :List[Any] = hidden_act
_lowerCAmelCase :Tuple = use_absolute_embeddings
_lowerCAmelCase :Optional[int] = patch_norm
_lowerCAmelCase :Optional[Any] = layer_norm_eps
_lowerCAmelCase :Union[str, Any] = initializer_range
_lowerCAmelCase :List[str] = is_training
_lowerCAmelCase :str = scope
_lowerCAmelCase :Optional[int] = use_labels
_lowerCAmelCase :List[Any] = type_sequence_label_size
_lowerCAmelCase :Union[str, Any] = encoder_stride
_lowerCAmelCase :Optional[int] = out_features
_lowerCAmelCase :List[str] = out_indices
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase :Dict = None
if self.use_labels:
_lowerCAmelCase :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase :str = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self: int ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple ):
_lowerCAmelCase :List[Any] = FocalNetModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCAmelCase :List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Union[str, Any] = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCAmelCase :Optional[int] = None
_lowerCAmelCase :Dict = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Any = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Any = FocalNetForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCAmelCase :List[Any] = 1
_lowerCAmelCase :List[Any] = FocalNetForMaskedImageModeling(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :int = model(_UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int , _UpperCAmelCase: Dict , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = self.type_sequence_label_size
_lowerCAmelCase :Dict = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase :Optional[int] = 1
_lowerCAmelCase :Tuple = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = config_and_inputs
_lowerCAmelCase :List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ (snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase : Optional[Any] = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Tuple = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Any = False
lowerCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = FocalNetModelTester(self )
_lowerCAmelCase :str = ConfigTester(self , config_class=_UpperCAmelCase , embed_dim=37 , has_text_modality=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
return
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def SCREAMING_SNAKE_CASE__ ( self: str ):
pass
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase :Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase , _lowerCAmelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Tuple = model_class(_UpperCAmelCase )
_lowerCAmelCase :Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase :int = [*signature.parameters.keys()]
_lowerCAmelCase :List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: int , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase :Optional[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_lowerCAmelCase :List[Any] = outputs.hidden_states
_lowerCAmelCase :str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# FocalNet has a different seq_length
_lowerCAmelCase :Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_lowerCAmelCase :List[str] = outputs.reshaped_hidden_states
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :int = reshaped_hidden_states[0].shape
_lowerCAmelCase :Optional[int] = (
reshaped_hidden_states[0].view(_UpperCAmelCase , _UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[int] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Dict = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase , _lowerCAmelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :str = 3
_lowerCAmelCase :Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCAmelCase :int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCAmelCase :Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :List[str] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Union[str, Any] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
@slow
def SCREAMING_SNAKE_CASE__ ( self: int ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase :List[Any] = FocalNetModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :Optional[int] = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
_lowerCAmelCase :str = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Tuple = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.default_image_processor
_lowerCAmelCase :Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCAmelCase :Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase :Dict = model(**_UpperCAmelCase )
# verify the logits
_lowerCAmelCase :str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
_lowerCAmelCase :Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : int = (FocalNetBackbone,) if is_torch_available() else ()
lowerCamelCase : str = FocalNetConfig
lowerCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Any = FocalNetModelTester(self ) | 687 | 0 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase =logging.get_logger(__name__)
lowercase ={
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
lowercase ={
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
lowercase ={
'vinai/phobert-base': 256,
'vinai/phobert-large': 256,
}
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
'''simple docstring'''
_UpperCAmelCase : Tuple =set()
_UpperCAmelCase : str =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase : Optional[Any] =char
_UpperCAmelCase : Any =set(__lowerCamelCase )
return pairs
class __magic_name__ ( snake_case__ ):
UpperCAmelCase =VOCAB_FILES_NAMES
UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , snake_case , snake_case , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , **snake_case , ) -> Any:
'''simple docstring'''
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , **_UpperCAmelCase , )
_UpperCAmelCase : Union[str, Any] =vocab_file
_UpperCAmelCase : List[Any] =merges_file
_UpperCAmelCase : Union[str, Any] ={}
_UpperCAmelCase : Optional[int] =0
_UpperCAmelCase : List[str] =1
_UpperCAmelCase : Optional[int] =2
_UpperCAmelCase : str =3
self.add_from_file(_UpperCAmelCase)
_UpperCAmelCase : str ={v: k for k, v in self.encoder.items()}
with open(_UpperCAmelCase , encoding='utf-8') as merges_handle:
_UpperCAmelCase : Tuple =merges_handle.read().split('\n')[:-1]
_UpperCAmelCase : int =[tuple(merge.split()[:-1]) for merge in merges]
_UpperCAmelCase : Optional[int] =dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase))))
_UpperCAmelCase : Union[str, Any] ={}
def lowerCAmelCase ( self , snake_case , snake_case = None) -> List[str]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : int =[self.cls_token_id]
_UpperCAmelCase : str =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self , snake_case , snake_case = None , snake_case = False) -> Dict:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase)
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase)) + [1]
return [1] + ([0] * len(_UpperCAmelCase)) + [1, 1] + ([0] * len(_UpperCAmelCase)) + [1]
def lowerCAmelCase ( self , snake_case , snake_case = None) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict =[self.sep_token_id]
_UpperCAmelCase : Union[str, Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return len(self.encoder)
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder)
def lowerCAmelCase ( self , snake_case) -> Union[str, Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_UpperCAmelCase : Dict =tuple(_UpperCAmelCase)
_UpperCAmelCase : List[Any] =tuple(list(word[:-1]) + [word[-1] + '</w>'])
_UpperCAmelCase : Optional[int] =get_pairs(_UpperCAmelCase)
if not pairs:
return token
while True:
_UpperCAmelCase : List[str] =min(_UpperCAmelCase , key=lambda snake_case: self.bpe_ranks.get(_UpperCAmelCase , float('inf')))
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase : int =bigram
_UpperCAmelCase : Union[str, Any] =[]
_UpperCAmelCase : List[str] =0
while i < len(_UpperCAmelCase):
try:
_UpperCAmelCase : int =word.index(_UpperCAmelCase , _UpperCAmelCase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_UpperCAmelCase : Optional[int] =j
if word[i] == first and i < len(_UpperCAmelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_UpperCAmelCase : Union[str, Any] =tuple(_UpperCAmelCase)
_UpperCAmelCase : Tuple =new_word
if len(_UpperCAmelCase) == 1:
break
else:
_UpperCAmelCase : List[str] =get_pairs(_UpperCAmelCase)
_UpperCAmelCase : Tuple ='@@ '.join(_UpperCAmelCase)
_UpperCAmelCase : Dict =word[:-4]
_UpperCAmelCase : List[str] =word
return word
def lowerCAmelCase ( self , snake_case) -> int:
'''simple docstring'''
_UpperCAmelCase : List[Any] =[]
_UpperCAmelCase : Optional[int] =re.findall(r'\S+\n?' , _UpperCAmelCase)
for token in words:
split_tokens.extend(list(self.bpe(_UpperCAmelCase).split(' ')))
return split_tokens
def lowerCAmelCase ( self , snake_case) -> Tuple:
'''simple docstring'''
return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token))
def lowerCAmelCase ( self , snake_case) -> List[str]:
'''simple docstring'''
return self.decoder.get(_UpperCAmelCase , self.unk_token)
def lowerCAmelCase ( self , snake_case) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Any =' '.join(_UpperCAmelCase).replace('@@ ' , '').strip()
return out_string
def lowerCAmelCase ( self , snake_case , snake_case = None) -> Union[str, Any]:
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
_UpperCAmelCase : int =os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
_UpperCAmelCase : Union[str, Any] =os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCAmelCase):
copyfile(self.vocab_file , _UpperCAmelCase)
if os.path.abspath(self.merges_file) != os.path.abspath(_UpperCAmelCase):
copyfile(self.merges_file , _UpperCAmelCase)
return out_vocab_file, out_merge_file
def lowerCAmelCase ( self , snake_case) -> Optional[Any]:
'''simple docstring'''
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
try:
with open(_UpperCAmelCase , 'r' , encoding='utf-8') as fd:
self.add_from_file(_UpperCAmelCase)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset")
return
_UpperCAmelCase : str =f.readlines()
for lineTmp in lines:
_UpperCAmelCase : str =lineTmp.strip()
_UpperCAmelCase : Tuple =line.rfind(' ')
if idx == -1:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt>\'')
_UpperCAmelCase : Optional[int] =line[:idx]
_UpperCAmelCase : Dict =len(self.encoder)
| 446 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
a = HfApi()
a = {}
# fmt: off
a = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
a = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
a = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
a = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
a = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
a = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
a = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
a = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
a = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
a = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
a = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
a = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
a = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
a = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
a = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
a = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
a = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith("""CompVis"""):
a = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
a = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
a = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
a = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
a = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3
)
print(F'''{mod.modelId} has passed successfully!!!''') | 687 | 0 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class __lowercase (snake_case__ ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
"""simple docstring"""
super().__init__(
features=_UpperCAmelCase , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase , streaming=_UpperCAmelCase , num_proc=_UpperCAmelCase , **_UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ : Dict = Generator(
cache_dir=_UpperCAmelCase , features=_UpperCAmelCase , generator=_UpperCAmelCase , gen_kwargs=_UpperCAmelCase , **_UpperCAmelCase , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.streaming:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase , download_mode=_UpperCAmelCase , verification_mode=_UpperCAmelCase , base_path=_UpperCAmelCase , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE_ : int = self.builder.as_dataset(
split='train' , verification_mode=_UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
| 101 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Optional[int] = 10
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :str = [1, 2, 3, 4]
_lowerCAmelCase :Union[str, Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_lowerCAmelCase :Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :List[str] = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
_lowerCAmelCase , _lowerCAmelCase :Optional[Any] = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [] )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Optional[int] = ''
_lowerCAmelCase , _lowerCAmelCase :str = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [] )
self.assertEqual(_UpperCAmelCase , [] )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Optional[Any] = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
_lowerCAmelCase , _lowerCAmelCase :Optional[int] = process_story(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Optional[int] = ['It was the best of times.']
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :Union[str, Any] = torch.tensor([1, 2, 3, 4] )
_lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 0 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :List[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_lowerCAmelCase :Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 23 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 1 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :List[str] = 101
_lowerCAmelCase :Dict = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_lowerCAmelCase :int = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_lowerCAmelCase :List[str] = compute_token_type_ids(_UpperCAmelCase , _UpperCAmelCase )
np.testing.assert_array_equal(_UpperCAmelCase , _UpperCAmelCase ) | 687 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _lowercase :
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[Any]:
return None
class _lowercase :
def a ( self : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[Any]:
return None
class _lowercase ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a ( self : int ) -> List[str]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_UpperCAmelCase , 'tf' , 12 , **_UpperCAmelCase )
@require_torch
@slow
def a ( self : str ) -> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_UpperCAmelCase , 'pt' , 12 , **_UpperCAmelCase )
@require_torch
@slow
def a ( self : Dict ) -> Optional[Any]:
from transformers import BertModel
__snake_case = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(_UpperCAmelCase ) )
vocab_file.flush()
__snake_case = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
__snake_case = BertModel(BertConfig(vocab_size=len(_UpperCAmelCase ) ) )
model.save_pretrained(_UpperCAmelCase )
self._test_export(_UpperCAmelCase , 'pt' , 12 , _UpperCAmelCase )
@require_tf
@slow
def a ( self : Union[str, Any] ) -> List[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__snake_case = self._test_export(_UpperCAmelCase , 'tf' , 12 , **_UpperCAmelCase )
__snake_case = quantize(Path(_UpperCAmelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_UpperCAmelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def a ( self : List[str] ) -> Any:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__snake_case = self._test_export(_UpperCAmelCase , 'pt' , 12 , **_UpperCAmelCase )
__snake_case = quantize(_UpperCAmelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_UpperCAmelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int=None , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> str:
try:
# Compute path
with TemporaryDirectory() as tempdir:
__snake_case = Path(_UpperCAmelCase ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
return path
except Exception as e:
self.fail(_UpperCAmelCase )
@require_torch
@require_tokenizers
@slow
def a ( self : List[Any] ) -> List[Any]:
from transformers import BertModel
__snake_case = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
__snake_case = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(_UpperCAmelCase , _UpperCAmelCase , 'pt' )
@require_tf
@require_tokenizers
@slow
def a ( self : Dict ) -> Tuple:
from transformers import TFBertModel
__snake_case = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
__snake_case = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(_UpperCAmelCase , _UpperCAmelCase , 'tf' )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str ) -> Tuple:
__snake_case = FeatureExtractionPipeline(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
__snake_case = infer_shapes(_UpperCAmelCase , _UpperCAmelCase )
# Assert all variables are present
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , _UpperCAmelCase )
self.assertSequenceEqual(variable_names[3:] , _UpperCAmelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] , {0: 'batch'} )
def a ( self : Tuple ) -> str:
__snake_case = ['input_ids', 'attention_mask', 'token_type_ids']
__snake_case = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
__snake_case = ensure_valid_input(FuncContiguousArgs() , _UpperCAmelCase , _UpperCAmelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(_UpperCAmelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(_UpperCAmelCase ) , set(_UpperCAmelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(_UpperCAmelCase , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
__snake_case = ensure_valid_input(FuncNonContiguousArgs() , _UpperCAmelCase , _UpperCAmelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(_UpperCAmelCase ) , 1 )
self.assertEqual(len(_UpperCAmelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] , 'input_ids' )
def a ( self : Optional[int] ) -> Optional[Any]:
__snake_case = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() )
| 56 |
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
a = int(input("""Enter number: """).strip())
print(F'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''') | 687 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 485 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: List[Any] , _UpperCAmelCase: int , _UpperCAmelCase: MutableSequence[float] ):
if len(_UpperCAmelCase ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
_lowerCAmelCase :list[float] = list(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = degree
def __add__( self: str , _UpperCAmelCase: Polynomial ):
if self.degree > polynomial_a.degree:
_lowerCAmelCase :Any = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _UpperCAmelCase )
else:
_lowerCAmelCase :List[Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _UpperCAmelCase )
def __sub__( self: str , _UpperCAmelCase: Polynomial ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self: Union[str, Any] ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self: int , _UpperCAmelCase: Polynomial ):
_lowerCAmelCase :list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: int | float ):
_lowerCAmelCase :int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self: Union[str, Any] ):
_lowerCAmelCase :Dict = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_UpperCAmelCase )
return polynomial
def __repr__( self: Optional[Any] ):
return self.__str__()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :list[float] = [0] * self.degree
for i in range(self.degree ):
_lowerCAmelCase :Tuple = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int | float = 0 ):
_lowerCAmelCase :list[float] = [0] * (self.degree + 2)
_lowerCAmelCase :str = constant
for i in range(self.degree + 1 ):
_lowerCAmelCase :List[str] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _UpperCAmelCase )
def __eq__( self: List[Any] , _UpperCAmelCase: object ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self: Optional[Any] , _UpperCAmelCase: object ):
return not self.__eq__(_UpperCAmelCase ) | 687 | 0 |
"""simple docstring"""
from typing import Any
def UpperCAmelCase ( A : list ):
'''simple docstring'''
if not input_list:
return []
_UpperCAmelCase = [input_list.count(A ) for value in input_list]
_UpperCAmelCase = max(A ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(A ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 573 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 | 0 |
from typing import Any
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self: int , __lowerCAmelCase: Any ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = data
__UpperCAmelCase = None
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self: Tuple ) -> Any:
'''simple docstring'''
__UpperCAmelCase = None
def _UpperCAmelCase ( self: Tuple ) -> str:
'''simple docstring'''
__UpperCAmelCase = self.head
while temp is not None:
print(temp.data , end=" " )
__UpperCAmelCase = temp.next
print()
def _UpperCAmelCase ( self: Any , __lowerCAmelCase: Any ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = Node(_UpperCAmelCase )
__UpperCAmelCase = self.head
__UpperCAmelCase = new_node
def _UpperCAmelCase ( self: Dict , __lowerCAmelCase: Tuple , __lowerCAmelCase: List[str] ) -> Dict:
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
__UpperCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
__UpperCAmelCase = node_a.next
__UpperCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
__UpperCAmelCase = node_a.next
if node_a is None or node_a is None:
return
__UpperCAmelCase = node_a.data, node_a.data
if __name__ == "__main__":
a_ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 221 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : float | Decimal , __magic_name__ : float = 10**-10 ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = a
while True:
_lowerCAmelCase :str = Decimal(__magic_name__ ) - (
Decimal(eval(__magic_name__ ) ) / Decimal(eval(str(diff(__magic_name__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__magic_name__ ) ) < precision: # noqa: S307
return float(__magic_name__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''') | 687 | 0 |
'''simple docstring'''
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
SCREAMING_SNAKE_CASE = Mapping[str, np.ndarray]
SCREAMING_SNAKE_CASE = Mapping[str, Any] # Is a nested dict.
SCREAMING_SNAKE_CASE = 0.01
@dataclasses.dataclass(frozen=snake_case__ )
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
UpperCamelCase_ = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
UpperCamelCase_ = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
UpperCamelCase_ = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
UpperCamelCase_ = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
UpperCamelCase_ = None
# Optional remark about the protein. Included as a comment in output PDB
# files
UpperCamelCase_ = None
# Templates used to generate this protein (prediction-only)
UpperCamelCase_ = None
# Chain corresponding to each parent
UpperCamelCase_ = None
def lowercase_ ( __A : str ) -> int:
"""simple docstring"""
lowercase : str =r'(\[[A-Z]+\]\n)'
lowercase : List[str] =[tag.strip() for tag in re.split(__A , __A ) if len(__A ) > 0]
lowercase : Iterator[Tuple[str, List[str]]] =zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
lowercase : List[str] =["N", "CA", "C"]
lowercase : Optional[Any] =None
lowercase : str =None
lowercase : Optional[int] =None
for g in groups:
if "[PRIMARY]" == g[0]:
lowercase : Union[str, Any] =g[1][0].strip()
for i in range(len(__A ) ):
if seq[i] not in residue_constants.restypes:
lowercase : Optional[int] ='X' # FIXME: strings are immutable
lowercase : List[str] =np.array(
[residue_constants.restype_order.get(__A , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowercase : List[List[float]] =[]
for axis in range(3 ):
tertiary.append(list(map(__A , g[1][axis].split() ) ) )
lowercase : str =np.array(__A )
lowercase : str =np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__A ):
lowercase : List[str] =np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowercase : List[str] =np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
lowercase : Tuple =np.zeros(
(
len(__A ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__A ):
lowercase : List[Any] =1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__A , atom_mask=__A , aatype=__A , residue_index=np.arange(len(__A ) ) , b_factors=__A , )
def lowercase_ ( __A : Protein , __A : int = 0 ) -> Union[str, Any]:
"""simple docstring"""
lowercase : List[str] =[]
lowercase : List[Any] =prot.remark
if remark is not None:
pdb_headers.append(F'REMARK {remark}' )
lowercase : Union[str, Any] =prot.parents
lowercase : Dict =prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowercase : int =[p for i, p in zip(__A , __A ) if i == chain_id]
if parents is None or len(__A ) == 0:
lowercase : int =['N/A']
pdb_headers.append(F'PARENT {" ".join(__A )}' )
return pdb_headers
def lowercase_ ( __A : Protein , __A : str ) -> Optional[int]:
"""simple docstring"""
lowercase : List[str] =[]
lowercase : Any =pdb_str.split('''\n''' )
lowercase : int =prot.remark
if remark is not None:
out_pdb_lines.append(F'REMARK {remark}' )
lowercase : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
lowercase : Optional[Any] =[]
if prot.parents_chain_index is not None:
lowercase : Dict[str, List[str]] ={}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__A ) , [] )
parent_dict[str(__A )].append(__A )
lowercase : Union[str, Any] =max([int(__A ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowercase : Dict =parent_dict.get(str(__A ) , ['''N/A'''] )
parents_per_chain.append(__A )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowercase : str =[['N/A']]
def make_parent_line(__A : Sequence[str] ) -> str:
return F'PARENT {" ".join(__A )}'
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowercase : List[str] =0
for i, l in enumerate(__A ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__A )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__A ):
lowercase : int =parents_per_chain[chain_counter]
else:
lowercase : Tuple =['N/A']
out_pdb_lines.append(make_parent_line(__A ) )
return "\n".join(__A )
def lowercase_ ( __A : Protein ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] =residue_constants.restypes + ['X']
def res_atoa(__A : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
lowercase : int =residue_constants.atom_types
lowercase : List[str] =[]
lowercase : Dict =prot.atom_mask
lowercase : str =prot.aatype
lowercase : Dict =prot.atom_positions
lowercase : Tuple =prot.residue_index.astype(np.intaa )
lowercase : str =prot.b_factors
lowercase : Dict =prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
lowercase : Dict =get_pdb_headers(__A )
if len(__A ) > 0:
pdb_lines.extend(__A )
lowercase : Optional[Any] =aatype.shape[0]
lowercase : Any =1
lowercase : List[str] =0
lowercase : Optional[Any] =string.ascii_uppercase
lowercase : List[str] =None
# Add all atom sites.
for i in range(__A ):
lowercase : Optional[int] =res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__A , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowercase : Optional[Any] ='ATOM'
lowercase : Tuple =atom_name if len(__A ) == 4 else F' {atom_name}'
lowercase : Union[str, Any] =''
lowercase : Tuple =''
lowercase : Optional[Any] =1.00
lowercase : Dict =atom_name[0] # Protein supports only C, N, O, S, this works.
lowercase : Dict =''
lowercase : Union[str, Any] ='A'
if chain_index is not None:
lowercase : List[str] =chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowercase : Optional[int] =(
F'{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'
F'{res_name_a:>3} {chain_tag:>1}'
F'{residue_index[i]:>4}{insertion_code:>1} '
F'{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'
F'{occupancy:>6.2f}{b_factor:>6.2f} '
F'{element:>2}{charge:>2}'
)
pdb_lines.append(__A )
atom_index += 1
lowercase : List[Any] =i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowercase : str =True
lowercase : Dict =chain_index[i + 1]
if should_terminate:
# Close the chain.
lowercase : List[str] ='TER'
lowercase : Tuple =(
F'{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'
)
pdb_lines.append(__A )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__A , __A ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(__A )
def lowercase_ ( __A : Protein ) -> int:
"""simple docstring"""
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def lowercase_ ( __A : FeatureDict , __A : ModelOutput , __A : Optional[np.ndarray] = None , __A : Optional[np.ndarray] = None , __A : Optional[str] = None , __A : Optional[Sequence[str]] = None , __A : Optional[Sequence[int]] = None , ) -> Dict:
"""simple docstring"""
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=__A , remark=__A , parents=__A , parents_chain_index=__A , )
| 94 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
a = {
"""sample_size""": 32,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [32, 64],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a = {
"""sample_size""": 64,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a = {
"""sample_size""": 256,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a = {
"""num_train_timesteps""": 40,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
a = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
a = {
"""num_train_timesteps""": 151,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
def UpperCamelCase_( __magic_name__ : Dict ):
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any]=False ):
"""simple docstring"""
_lowerCAmelCase :int = checkpoint[f"""{old_prefix}.in_layers.0.weight"""]
_lowerCAmelCase :Union[str, Any] = checkpoint[f"""{old_prefix}.in_layers.0.bias"""]
_lowerCAmelCase :str = checkpoint[f"""{old_prefix}.in_layers.2.weight"""]
_lowerCAmelCase :Optional[Any] = checkpoint[f"""{old_prefix}.in_layers.2.bias"""]
_lowerCAmelCase :str = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""]
_lowerCAmelCase :Any = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""]
_lowerCAmelCase :str = checkpoint[f"""{old_prefix}.out_layers.0.weight"""]
_lowerCAmelCase :List[Any] = checkpoint[f"""{old_prefix}.out_layers.0.bias"""]
_lowerCAmelCase :Optional[int] = checkpoint[f"""{old_prefix}.out_layers.3.weight"""]
_lowerCAmelCase :Dict = checkpoint[f"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
_lowerCAmelCase :List[Any] = checkpoint[f"""{old_prefix}.skip_connection.weight"""]
_lowerCAmelCase :int = checkpoint[f"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : List[str]=None ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Tuple = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Any = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
_lowerCAmelCase :int = checkpoint[f"""{old_prefix}.norm.weight"""]
_lowerCAmelCase :Dict = checkpoint[f"""{old_prefix}.norm.bias"""]
_lowerCAmelCase :Dict = weight_q.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :str = bias_q.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :List[str] = weight_k.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :Optional[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :Tuple = weight_v.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :List[Any] = bias_v.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase :int = (
checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
_lowerCAmelCase :Optional[Any] = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Optional[Any] ):
"""simple docstring"""
_lowerCAmelCase :Union[str, Any] = torch.load(__magic_name__ , map_location='cpu' )
_lowerCAmelCase :List[Any] = {}
_lowerCAmelCase :List[str] = checkpoint['time_embed.0.weight']
_lowerCAmelCase :Tuple = checkpoint['time_embed.0.bias']
_lowerCAmelCase :Dict = checkpoint['time_embed.2.weight']
_lowerCAmelCase :Union[str, Any] = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_lowerCAmelCase :Union[str, Any] = checkpoint['label_emb.weight']
_lowerCAmelCase :str = checkpoint['input_blocks.0.0.weight']
_lowerCAmelCase :str = checkpoint['input_blocks.0.0.bias']
_lowerCAmelCase :List[Any] = unet_config['down_block_types']
_lowerCAmelCase :Any = unet_config['layers_per_block']
_lowerCAmelCase :List[Any] = unet_config['attention_head_dim']
_lowerCAmelCase :Tuple = unet_config['block_out_channels']
_lowerCAmelCase :List[str] = 1
_lowerCAmelCase :Optional[int] = channels_list[0]
for i, layer_type in enumerate(__magic_name__ ):
_lowerCAmelCase :Tuple = channels_list[i]
_lowerCAmelCase :Optional[Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__magic_name__ ):
_lowerCAmelCase :int = f"""down_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :List[Any] = f"""input_blocks.{current_layer}.0"""
_lowerCAmelCase :int = True if j == 0 and downsample_block_has_skip else False
_lowerCAmelCase :List[Any] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__magic_name__ ):
_lowerCAmelCase :List[str] = f"""down_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :Optional[int] = f"""input_blocks.{current_layer}.0"""
_lowerCAmelCase :List[str] = True if j == 0 and downsample_block_has_skip else False
_lowerCAmelCase :Optional[int] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
_lowerCAmelCase :Optional[int] = f"""down_blocks.{i}.attentions.{j}"""
_lowerCAmelCase :str = f"""input_blocks.{current_layer}.1"""
_lowerCAmelCase :Optional[Any] = convert_attention(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
_lowerCAmelCase :Union[str, Any] = f"""down_blocks.{i}.downsamplers.0"""
_lowerCAmelCase :Tuple = f"""input_blocks.{current_layer}.0"""
_lowerCAmelCase :Optional[int] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
current_layer += 1
_lowerCAmelCase :Dict = current_channels
# hardcoded the mid-block for now
_lowerCAmelCase :int = 'mid_block.resnets.0'
_lowerCAmelCase :Optional[Any] = 'middle_block.0'
_lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Optional[int] = 'mid_block.attentions.0'
_lowerCAmelCase :Optional[int] = 'middle_block.1'
_lowerCAmelCase :List[Any] = convert_attention(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Union[str, Any] = 'mid_block.resnets.1'
_lowerCAmelCase :Optional[int] = 'middle_block.2'
_lowerCAmelCase :int = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Tuple = 0
_lowerCAmelCase :str = unet_config['up_block_types']
for i, layer_type in enumerate(__magic_name__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCAmelCase :Optional[Any] = f"""up_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :Dict = f"""output_blocks.{current_layer}.0"""
_lowerCAmelCase :Any = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
_lowerCAmelCase :Any = f"""up_blocks.{i}.upsamplers.0"""
_lowerCAmelCase :Dict = f"""output_blocks.{current_layer-1}.1"""
_lowerCAmelCase :Tuple = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCAmelCase :Tuple = f"""up_blocks.{i}.resnets.{j}"""
_lowerCAmelCase :List[str] = f"""output_blocks.{current_layer}.0"""
_lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ )
_lowerCAmelCase :str = f"""up_blocks.{i}.attentions.{j}"""
_lowerCAmelCase :List[Any] = f"""output_blocks.{current_layer}.1"""
_lowerCAmelCase :int = convert_attention(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
current_layer += 1
if i != len(__magic_name__ ) - 1:
_lowerCAmelCase :Optional[int] = f"""up_blocks.{i}.upsamplers.0"""
_lowerCAmelCase :int = f"""output_blocks.{current_layer-1}.2"""
_lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_lowerCAmelCase :str = checkpoint['out.0.weight']
_lowerCAmelCase :Union[str, Any] = checkpoint['out.0.bias']
_lowerCAmelCase :List[Any] = checkpoint['out.2.weight']
_lowerCAmelCase :Dict = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
a = parser.parse_args()
a = strabool(args.class_cond)
a = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
a = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
a = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
a = None
a = con_pt_to_diffuser(args.unet_path, unet_config)
a = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
a = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
a = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
a = CMStochasticIterativeScheduler(**scheduler_config)
a = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path) | 687 | 0 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_lowerCamelCase : Optional[Any] = logging.getLogger(__name__)
def _UpperCAmelCase (UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : Optional[int]=16 , UpperCamelCase_ : int = 10 , UpperCamelCase_ : int = 2 ):
'''simple docstring'''
def get_dataset(UpperCamelCase_ : List[Any] ):
_lowerCAmelCase : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(UpperCamelCase_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
_lowerCAmelCase : int = get_dataset(UpperCamelCase_ )
_lowerCAmelCase : Optional[Any] = get_dataset(UpperCamelCase_ )
_lowerCAmelCase : str = DataLoader(UpperCamelCase_ , shuffle=UpperCamelCase_ , batch_size=UpperCamelCase_ , num_workers=4 )
_lowerCAmelCase : Optional[int] = DataLoader(UpperCamelCase_ , shuffle=UpperCamelCase_ , batch_size=UpperCamelCase_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def _UpperCAmelCase (UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any]=None ):
'''simple docstring'''
_lowerCAmelCase : Tuple = []
for epoch in range(UpperCamelCase_ ):
# Train quickly
model.train()
for batch in dataloader:
_lowerCAmelCase : Union[str, Any] = batch
_lowerCAmelCase : Tuple = model(UpperCamelCase_ )
_lowerCAmelCase : Union[str, Any] = torch.nn.functional.mse_loss(UpperCamelCase_ , UpperCamelCase_ )
accelerator.backward(UpperCamelCase_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __snake_case (nn.Module ):
def __init__( self : Any ) -> List[Any]:
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = nn.Parameter(torch.randn(1 ) )
_lowerCAmelCase : str = nn.Parameter(torch.randn(1 ) )
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
return x * self.a + self.b
class __snake_case (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowerCAmelCase : Optional[int] = DummyModel()
_lowerCAmelCase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowerCAmelCase : int = dummy_dataloaders()
_lowerCAmelCase : List[Any] = ProjectConfiguration(total_limit=1 , project_dir=_UpperCAmelCase , automatic_checkpoint_naming=_UpperCAmelCase )
# Train baseline
_lowerCAmelCase : str = Accelerator(project_config=_UpperCAmelCase )
_lowerCAmelCase : Dict = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowerCAmelCase : Optional[Any] = DummyModel()
_lowerCAmelCase : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowerCAmelCase : List[str] = dummy_dataloaders()
# Train baseline
_lowerCAmelCase : Dict = Accelerator()
_lowerCAmelCase : Union[str, Any] = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save initial
_lowerCAmelCase : Optional[Any] = os.path.join(_UpperCAmelCase , """initial""" )
accelerator.save_state(_UpperCAmelCase )
(_lowerCAmelCase) : Dict = model.a.item(), model.b.item()
_lowerCAmelCase : Optional[Any] = optimizer.state_dict()
_lowerCAmelCase : Union[str, Any] = train(3 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
(_lowerCAmelCase) : Optional[Any] = model.a.item(), model.b.item()
_lowerCAmelCase : Optional[int] = optimizer.state_dict()
# Train partially
set_seed(42 )
_lowerCAmelCase : Optional[int] = DummyModel()
_lowerCAmelCase : Tuple = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowerCAmelCase : List[str] = dummy_dataloaders()
_lowerCAmelCase : str = Accelerator()
_lowerCAmelCase : int = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
accelerator.load_state(_UpperCAmelCase )
(_lowerCAmelCase) : Any = model.a.item(), model.b.item()
_lowerCAmelCase : Optional[int] = optimizer.state_dict()
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : Tuple = train(2 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save everything
_lowerCAmelCase : str = os.path.join(_UpperCAmelCase , """checkpoint""" )
accelerator.save_state(_UpperCAmelCase )
# Load everything back in and make sure all states work
accelerator.load_state(_UpperCAmelCase )
test_rands += train(1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
(_lowerCAmelCase) : Any = model.a.item(), model.b.item()
_lowerCAmelCase : str = optimizer.state_dict()
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowerCAmelCase : Dict = DummyModel()
_lowerCAmelCase : Dict = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowerCAmelCase : Optional[Any] = dummy_dataloaders()
_lowerCAmelCase : Optional[int] = ProjectConfiguration(automatic_checkpoint_naming=_UpperCAmelCase )
# Train baseline
_lowerCAmelCase : Optional[int] = Accelerator(project_dir=_UpperCAmelCase , project_config=_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save initial
accelerator.save_state()
(_lowerCAmelCase) : Any = model.a.item(), model.b.item()
_lowerCAmelCase : Any = optimizer.state_dict()
_lowerCAmelCase : str = train(3 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
(_lowerCAmelCase) : Optional[int] = model.a.item(), model.b.item()
_lowerCAmelCase : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
_lowerCAmelCase : List[Any] = DummyModel()
_lowerCAmelCase : Dict = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowerCAmelCase : List[str] = dummy_dataloaders()
_lowerCAmelCase : Tuple = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = Accelerator(project_dir=_UpperCAmelCase , project_config=_UpperCAmelCase )
_lowerCAmelCase : Tuple = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
accelerator.load_state(os.path.join(_UpperCAmelCase , """checkpoints""" , """checkpoint_0""" ) )
(_lowerCAmelCase) : Optional[int] = model.a.item(), model.b.item()
_lowerCAmelCase : str = optimizer.state_dict()
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : Any = train(2 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCAmelCase , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
(_lowerCAmelCase) : List[str] = model.a.item(), model.b.item()
_lowerCAmelCase : str = optimizer.state_dict()
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = torch.tensor([1, 2, 3] )
_lowerCAmelCase : Any = torch.tensor([2, 3, 4] )
_lowerCAmelCase : int = DummyModel()
_lowerCAmelCase : int = torch.optim.Adam(net.parameters() )
_lowerCAmelCase : Optional[Any] = Accelerator()
with self.assertRaises(_UpperCAmelCase ) as ve:
accelerator.register_for_checkpointing(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowerCAmelCase : List[str] = DummyModel()
_lowerCAmelCase : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowerCAmelCase : Any = torch.optim.lr_scheduler.StepLR(_UpperCAmelCase , step_size=1 , gamma=0.99 )
_lowerCAmelCase : List[str] = dummy_dataloaders()
_lowerCAmelCase : str = ProjectConfiguration(automatic_checkpoint_naming=_UpperCAmelCase )
# Train baseline
_lowerCAmelCase : Union[str, Any] = Accelerator(project_dir=_UpperCAmelCase , project_config=_UpperCAmelCase )
_lowerCAmelCase : List[str] = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save initial
accelerator.save_state()
_lowerCAmelCase : List[str] = scheduler.state_dict()
train(3 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCAmelCase , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(_UpperCAmelCase , scheduler.state_dict() )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowerCAmelCase : Optional[int] = DummyModel()
_lowerCAmelCase : int = ProjectConfiguration(automatic_checkpoint_naming=_UpperCAmelCase , total_limit=2 )
# Train baseline
_lowerCAmelCase : Optional[Any] = Accelerator(project_dir=_UpperCAmelCase , project_config=_UpperCAmelCase )
_lowerCAmelCase : Optional[int] = accelerator.prepare(_UpperCAmelCase )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_UpperCAmelCase , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : Tuple = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCamelCase : Dict = "/tmp/accelerate/state_checkpointing"
_lowerCamelCase : Union[str, Any] = DummyModel()
_lowerCamelCase : int = torch.optim.Adam(params=model.parameters(), lr=1e-3)
_lowerCamelCase : List[str] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
_lowerCamelCase , _lowerCamelCase : Optional[int] = dummy_dataloaders()
_lowerCamelCase : Optional[Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_lowerCamelCase : int = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[int] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_lowerCamelCase , _lowerCamelCase : List[str] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_lowerCamelCase : List[Any] = group["params"][0].device
break
assert param_device.type == accelerator.device.type
_lowerCamelCase : Tuple = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
_lowerCamelCase : Optional[int] = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
_lowerCamelCase : str = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 429 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
a = """ \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
_lowerCAmelCase :Tuple = self.diffusers_dir
shutil.copy(
os.path.join(_UpperCAmelCase , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :str = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=None ):
_lowerCAmelCase :int = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_lowerCAmelCase :Dict = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_lowerCAmelCase :Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_lowerCAmelCase :List[str] = black.format_str(_UpperCAmelCase , mode=_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(_UpperCAmelCase , 'w' , newline='\n' ) as f:
f.write(_UpperCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_UpperCAmelCase )
with open(_UpperCAmelCase , 'r' ) as f:
self.assertTrue(f.read() , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :List[str] = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
# Base copy consistency
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , _UpperCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , )
# Copy consistency with a really long name
_lowerCAmelCase :Optional[int] = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , _UpperCAmelCase , _UpperCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , _UpperCAmelCase , overwrite_result=re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , ) | 687 | 0 |
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__A : List[str] = logging.get_logger(__name__)
__A : Optional[int] = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , lowercase__ : Union[str, Any]=None , **lowercase__ : List[str] ):
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
__lowercase : str = model
__lowercase : Union[str, Any] = kwargs.get("model_save_dir" , _UpperCAmelCase )
__lowercase : List[Any] = kwargs.get("latest_model_name" , _UpperCAmelCase )
def __call__( self : Optional[Any] , **lowercase__ : Dict ):
__lowercase : Tuple = {k: np.array(_UpperCAmelCase ) for k, v in kwargs.items()}
return self.model.run(_UpperCAmelCase , _UpperCAmelCase )
@staticmethod
def snake_case ( lowercase__ : Union[str, Path] , lowercase__ : Tuple=None , lowercase__ : Tuple=None ):
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
__lowercase : Dict = 'CPUExecutionProvider'
return ort.InferenceSession(_UpperCAmelCase , providers=[provider] , sess_options=_UpperCAmelCase )
def snake_case ( self : Any , lowercase__ : Union[str, Path] , lowercase__ : Optional[str] = None , **lowercase__ : Any ):
__lowercase : Optional[int] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__lowercase : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name )
__lowercase : str = Path(_UpperCAmelCase ).joinpath(_UpperCAmelCase )
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__lowercase : Optional[Any] = self.model_save_dir.joinpath(_UpperCAmelCase )
if src_path.exists():
__lowercase : Any = Path(_UpperCAmelCase ).joinpath(_UpperCAmelCase )
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase )
except shutil.SameFileError:
pass
def snake_case ( self : List[Any] , lowercase__ : Union[str, os.PathLike] , **lowercase__ : Optional[Any] , ):
if os.path.isfile(_UpperCAmelCase ):
logger.error(f'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
# saving model weights/files
self._save_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def snake_case ( cls : Tuple , lowercase__ : Union[str, Path] , lowercase__ : Optional[Union[bool, str, None]] = None , lowercase__ : Optional[Union[str, None]] = None , lowercase__ : bool = False , lowercase__ : Optional[str] = None , lowercase__ : Optional[str] = None , lowercase__ : Optional[str] = None , lowercase__ : Optional["ort.SessionOptions"] = None , **lowercase__ : List[str] , ):
__lowercase : List[Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCAmelCase ):
__lowercase : int = OnnxRuntimeModel.load_model(
os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase )
__lowercase : str = Path(_UpperCAmelCase )
# load model from hub
else:
# download model
__lowercase : Union[str, Any] = hf_hub_download(
repo_id=_UpperCAmelCase , filename=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , )
__lowercase : List[str] = Path(_UpperCAmelCase ).parent
__lowercase : Union[str, Any] = Path(_UpperCAmelCase ).name
__lowercase : Any = OnnxRuntimeModel.load_model(_UpperCAmelCase , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase )
return cls(model=_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def snake_case ( cls : Union[str, Any] , lowercase__ : Union[str, Path] , lowercase__ : bool = True , lowercase__ : Optional[str] = None , lowercase__ : Optional[str] = None , **lowercase__ : Any , ):
__lowercase : List[str] = None
if len(str(_UpperCAmelCase ).split("@" ) ) == 2:
__lowercase : Any = model_id.split("@" )
return cls._from_pretrained(
model_id=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , **_UpperCAmelCase , )
| 575 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be trained.'} )
lowerCamelCase : Optional[str] = field(
default='./' , metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path of training dataset.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for training.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for evaluation.'} )
lowerCamelCase : Optional[float] = field(default=0.1 , metadata={'help': 'Value of weight decay.'} )
lowerCamelCase : Optional[int] = field(
default=1_00_00 , metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} )
lowerCamelCase : Optional[float] = field(default=2e-4 , metadata={'help': 'Learning rate fo training.'} )
lowerCamelCase : Optional[str] = field(default='cosine' , metadata={'help': 'Learning rate.'} )
lowerCamelCase : Optional[int] = field(
default=7_50 , metadata={'help': 'Number of warmup steps in the learning rate schedule.'} )
lowerCamelCase : Optional[int] = field(
default=16 , metadata={'help': 'Number of gradient accumulation steps.'} )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} )
lowerCamelCase : Optional[int] = field(default=5_00_00 , metadata={'help': 'Maximum number of training steps.'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Sequence lengths used for training.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Training seed.'} )
lowerCamelCase : Optional[int] = field(
default=10_24 , metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} , )
lowerCamelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'States path if the training should continue from a checkpoint folder.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'If True the data is pretokenized.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size used for evaluation.'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Length of sequences to be evaluated.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} )
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} , )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Sample from the language model\'s output distribution.'} )
lowerCamelCase : Optional[float] = field(default=0.2 , metadata={'help': 'Sampling temperature used for generation.'} )
lowerCamelCase : Optional[int] = field(default=2_56 , metadata={'help': 'Maximum number of newly generated tokens.'} )
lowerCamelCase : Optional[int] = field(default=0 , metadata={'help': 'Top-k parameter used for generation.'} )
lowerCamelCase : Optional[float] = field(default=0.95 , metadata={'help': 'Top-p parameter used for nucleus sampling.'} )
lowerCamelCase : Optional[int] = field(default=10 , metadata={'help': 'Number of generations to run in parallel.'} )
lowerCamelCase : Optional[int] = field(
default=2_00 , metadata={'help': 'Number of completions to generate for each sample.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase : Optional[str] = field(
default='eval_results.json' , metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase : Optional[str] = field(
default='0' , metadata={'help': 'Allow `code_eval` to execute Python code on machine'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={
'help': (
'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'
' number corresponds to which GPU device id to run on.'
)
} , )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={
'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'
} , )
lowerCamelCase : Optional[str] = field(
default='transformersbook/codeparrot' , metadata={'help': 'Folder or name of dataset to process.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot-clean' , metadata={'help': 'Folder to save processed processed dataset.'} )
lowerCamelCase : Optional[int] = field(
default=10_00_00 , metadata={'help': 'Number of files to save per JSON output file.'} )
lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase : Optional[float] = field(
default=10_00 , metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=1_00 , metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=0.25 , metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=1.5 , metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=0.7 , metadata={'help': 'Probability for filtering config, test and uncommon files.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} , )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'If True, near-duplicate samples are removed.'} )
lowerCamelCase : Optional[float] = field(
default=0.85 , metadata={'help': 'Jaccard threshold for near-duplicate samples.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='gpt2' , metadata={'help': 'Base tokenizer to build new tokenizer from.'} )
lowerCamelCase : Optional[str] = field(
default='transformersbook/codeparrot-train' , metadata={'help': 'Dataset to train tokenizer on.'} )
lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase : Optional[int] = field(default=20_00_00 , metadata={'help': 'Number of examples to train tokenizer on.'} )
lowerCamelCase : Optional[int] = field(
default=3_27_68 , metadata={'help': 'Number of examples to train the tokenizer on.'} )
lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of new tokenizer.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path to the dataset to pretokenize.'} )
lowerCamelCase : Optional[str] = field(
default='tokenized-codeparrot-train' , metadata={'help': 'Repo name of the pretokenized data.'} )
lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='gpt2-large' , metadata={'help': 'Configuration to use for model initialization.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Tokenizer attached to model.'} )
lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of the created model.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} ) | 687 | 0 |
'''simple docstring'''
def __a ( lowerCAmelCase__ : int = 600851475143 ):
try:
a__ : int = int(lowerCAmelCase__ )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
a__ : Union[str, Any] = 2
a__ : int = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
a__ : Optional[int] = i
while n % i == 0:
a__ : int = n // i
i += 1
return int(lowerCAmelCase__ )
if __name__ == "__main__":
print(f'{solution() = }')
| 688 |
'''simple docstring'''
def __a ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : int ):
a__ : List[str] = len(lowerCAmelCase__ )
a__ : int = [[0] * n for i in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
a__ : Dict = y_points[i]
for i in range(2 , lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : Any = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 | 1 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def __a ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float ):
# For applying gaussian function for each element in matrix.
a__ : str = math.sqrt(lowerCAmelCase__ )
a__ : Union[str, Any] = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __a ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
a__ : List[Any] = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : float ):
# Creates a gaussian kernel of given dimension.
a__ : Optional[int] = np.zeros((kernel_size, kernel_size) )
for i in range(0 , lowerCAmelCase__ ):
for j in range(0 , lowerCAmelCase__ ):
a__ : str = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : int , ):
a__ : Tuple = np.zeros(img.shape )
a__ : Any = get_gauss_kernel(lowerCAmelCase__ , lowerCAmelCase__ )
a__ , a__ : Optional[Any] = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
a__ : Union[str, Any] = get_slice(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Dict = img_s - img_s[kernel_size // 2, kernel_size // 2]
a__ : Union[str, Any] = vec_gaussian(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : str = np.multiply(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Any = np.multiply(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : str = np.sum(lowerCAmelCase__ ) / np.sum(lowerCAmelCase__ )
a__ : Optional[int] = val
return imga
def __a ( lowerCAmelCase__ : list ):
a__ : List[str] = args[1] if args[1:] else '''../image_data/lena.jpg'''
a__ : Dict = float(args[2] ) if args[2:] else 1.0
a__ : Optional[int] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
a__ : Optional[Any] = int(args[4] )
a__ : Any = kernel_size + abs(kernel_size % 2 - 1 )
else:
a__ : Optional[int] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parse_args(sys.argv)
__SCREAMING_SNAKE_CASE = cva.imread(filename, 0)
cva.imshow('input image', img)
__SCREAMING_SNAKE_CASE = img / 2_5_5
__SCREAMING_SNAKE_CASE = out.astype('float32')
__SCREAMING_SNAKE_CASE = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
__SCREAMING_SNAKE_CASE = out * 2_5_5
__SCREAMING_SNAKE_CASE = np.uinta(out)
cva.imshow('output image', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 688 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = "swin2sr"
__UpperCamelCase = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Union[str, Any] , A__ : int=6_4 , A__ : List[Any]=1 , A__ : List[Any]=3 , A__ : Any=1_8_0 , A__ : Optional[int]=[6, 6, 6, 6, 6, 6] , A__ : Optional[int]=[6, 6, 6, 6, 6, 6] , A__ : Dict=8 , A__ : Any=2.0 , A__ : Optional[int]=True , A__ : Union[str, Any]=0.0 , A__ : Union[str, Any]=0.0 , A__ : List[str]=0.1 , A__ : Any="gelu" , A__ : Tuple=False , A__ : Optional[int]=0.02 , A__ : List[Any]=1E-5 , A__ : Any=2 , A__ : Union[str, Any]=1.0 , A__ : Dict="1conv" , A__ : Optional[Any]="pixelshuffle" , **A__ : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**A__ )
a__ : List[str] = image_size
a__ : Optional[Any] = patch_size
a__ : Dict = num_channels
a__ : Optional[int] = embed_dim
a__ : int = depths
a__ : Optional[int] = len(A__ )
a__ : Dict = num_heads
a__ : List[Any] = window_size
a__ : Optional[int] = mlp_ratio
a__ : Optional[int] = qkv_bias
a__ : Union[str, Any] = hidden_dropout_prob
a__ : Dict = attention_probs_dropout_prob
a__ : Union[str, Any] = drop_path_rate
a__ : int = hidden_act
a__ : int = use_absolute_embeddings
a__ : Dict = layer_norm_eps
a__ : List[str] = initializer_range
a__ : List[Any] = upscale
a__ : List[Any] = img_range
a__ : Optional[int] = resi_connection
a__ : int = upsampler
| 688 | 1 |
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = AlbertTokenizer
__UpperCamelCase = AlbertTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
def __lowerCAmelCase ( self : Optional[Any] ) -> int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
a__ : Any = AlbertTokenizer(A__ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : List[Any] , A__ : Dict ) -> Optional[int]:
'''simple docstring'''
a__ : Any = '''this is a test'''
a__ : Union[str, Any] = '''this is a test'''
return input_text, output_text
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
a__ : Any = '''<pad>'''
a__ : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A__ ) , A__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A__ ) , A__ )
def __lowerCAmelCase ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
a__ : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''▁eloquent''' )
self.assertEqual(len(A__ ) , 3_0_0_0_0 )
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def __lowerCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a__ : Dict = self.get_tokenizer()
a__ : Optional[int] = self.get_rust_tokenizer()
a__ : Tuple = '''I was born in 92000, and this is falsé.'''
a__ : List[str] = tokenizer.tokenize(A__ )
a__ : Union[str, Any] = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
a__ : List[Any] = tokenizer.encode(A__ , add_special_tokens=A__ )
a__ : int = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
a__ : Any = self.get_rust_tokenizer()
a__ : str = tokenizer.encode(A__ )
a__ : int = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
def __lowerCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
a__ : Any = AlbertTokenizer(A__ , keep_accents=A__ )
a__ : Tuple = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(A__ , ['''▁this''', '''▁is''', '''▁a''', '''▁test'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [4_8, 2_5, 2_1, 1_2_8_9] )
a__ : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
A__ , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''] )
a__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(A__ )
self.assertListEqual(A__ , [3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] )
a__ : Optional[Any] = tokenizer.convert_ids_to_tokens(A__ )
self.assertListEqual(
A__ , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.'''] , )
def __lowerCAmelCase ( self : Optional[Any] ) -> Any:
'''simple docstring'''
a__ : Any = AlbertTokenizer(A__ )
a__ : int = tokenizer.encode('''sequence builders''' )
a__ : Any = tokenizer.encode('''multi-sequence build''' )
a__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(A__ )
a__ : Tuple = tokenizer.build_inputs_with_special_tokens(A__ , A__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def __lowerCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
a__ : Union[str, Any] = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A__ , model_name='''albert-base-v2''' , revision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''' , )
| 688 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
a__ : int = 0
def __lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : List[Any] = Path(A__ ) / '''preprocessor_config.json'''
a__ : List[Any] = Path(A__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
a__ : Any = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : int = Path(A__ ) / '''preprocessor_config.json'''
a__ : Optional[Any] = Path(A__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
a__ : Tuple = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Dict = CLIPConfig()
# Create a dummy config file with image_proceesor_type
a__ : int = Path(A__ ) / '''preprocessor_config.json'''
a__ : Optional[int] = Path(A__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
a__ : List[Any] = AutoImageProcessor.from_pretrained(A__ ).to_dict()
config_dict.pop('''image_processor_type''' )
a__ : Union[str, Any] = CLIPImageProcessor(**A__ )
# save in new folder
model_config.save_pretrained(A__ )
config.save_pretrained(A__ )
a__ : Union[str, Any] = AutoImageProcessor.from_pretrained(A__ )
# make sure private variable is not incorrectly saved
a__ : Optional[Any] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Optional[int] = Path(A__ ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
a__ : Any = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
A__ , '''clip-base is not a local folder and is not a valid model identifier''' ):
a__ : str = AutoImageProcessor.from_pretrained('''clip-base''' )
def __lowerCAmelCase ( self : Optional[Any] ) -> int:
'''simple docstring'''
with self.assertRaisesRegex(
A__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
a__ : Tuple = AutoImageProcessor.from_pretrained(A__ , revision='''aaaaaa''' )
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
A__ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
a__ : Union[str, Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
with self.assertRaises(A__ ):
a__ : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A__ ):
a__ : Tuple = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
a__ : Tuple = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(A__ )
a__ : str = AutoImageProcessor.from_pretrained(A__ , trust_remote_code=A__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , A__ )
AutoImageProcessor.register(A__ , A__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A__ ):
AutoImageProcessor.register(A__ , A__ )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Optional[int] = Path(A__ ) / '''preprocessor_config.json'''
a__ : List[str] = Path(A__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
a__ : Tuple = CustomImageProcessor.from_pretrained(A__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(A__ )
a__ : Tuple = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
'''simple docstring'''
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = True
try:
AutoConfig.register('''custom''' , A__ )
AutoImageProcessor.register(A__ , A__ )
# If remote code is not set, the default is to use local
a__ : Dict = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
a__ : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
a__ : Optional[int] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(A__ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 688 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCamelCase = MBartConfig
__UpperCamelCase = {}
__UpperCamelCase = "gelu"
def __init__( self : int , A__ : Any , A__ : Optional[int]=1_3 , A__ : Optional[int]=7 , A__ : Any=True , A__ : Dict=False , A__ : Dict=9_9 , A__ : List[Any]=3_2 , A__ : str=2 , A__ : str=4 , A__ : Union[str, Any]=3_7 , A__ : Union[str, Any]=0.1 , A__ : Tuple=0.1 , A__ : Dict=2_0 , A__ : Optional[int]=2 , A__ : Optional[Any]=1 , A__ : Optional[int]=0 , ) -> Dict:
'''simple docstring'''
a__ : Dict = parent
a__ : Any = batch_size
a__ : Any = seq_length
a__ : Any = is_training
a__ : Optional[int] = use_labels
a__ : Dict = vocab_size
a__ : Optional[int] = hidden_size
a__ : Any = num_hidden_layers
a__ : Optional[int] = num_attention_heads
a__ : List[Any] = intermediate_size
a__ : Optional[Any] = hidden_dropout_prob
a__ : List[Any] = attention_probs_dropout_prob
a__ : Union[str, Any] = max_position_embeddings
a__ : Any = eos_token_id
a__ : str = pad_token_id
a__ : List[str] = bos_token_id
def __lowerCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
a__ : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
a__ : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
a__ : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
a__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
a__ : str = prepare_mbart_inputs_dict(A__ , A__ , A__ )
return config, inputs_dict
def __lowerCAmelCase ( self : Tuple , A__ : str , A__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
a__ : str = TFMBartModel(config=A__ ).get_decoder()
a__ : Dict = inputs_dict['''input_ids''']
a__ : List[str] = input_ids[:1, :]
a__ : Dict = inputs_dict['''attention_mask'''][:1, :]
a__ : Any = inputs_dict['''head_mask''']
a__ : List[str] = 1
# first forward pass
a__ : Optional[Any] = model(A__ , attention_mask=A__ , head_mask=A__ , use_cache=A__ )
a__ , a__ : str = outputs.to_tuple()
a__ : Optional[int] = past_key_values[1]
def __a ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : str=None , lowerCAmelCase__ : str=None , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : List[str]=None , ):
if attention_mask is None:
a__ : Optional[Any] = tf.cast(tf.math.not_equal(lowerCAmelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
a__ : Any = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
a__ : Optional[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a__ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
a__ : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCAmelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__UpperCamelCase = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__UpperCamelCase = (
{
"conversational": TFMBartForConditionalGeneration,
"feature-extraction": TFMBartModel,
"summarization": TFMBartForConditionalGeneration,
"text2text-generation": TFMBartForConditionalGeneration,
"translation": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
def __lowerCAmelCase ( self : List[str] , A__ : Optional[int] , A__ : Union[str, Any] , A__ : Dict , A__ : Union[str, Any] , A__ : Any ) -> Tuple:
'''simple docstring'''
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def __lowerCAmelCase ( self : str ) -> int:
'''simple docstring'''
a__ : Union[str, Any] = TFMBartModelTester(self )
a__ : List[Any] = ConfigTester(self , config_class=A__ )
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
'''simple docstring'''
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = [
" UN Chief Says There Is No Military Solution in Syria",
]
__UpperCamelCase = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
]
__UpperCamelCase = "facebook/mbart-large-en-ro"
@cached_property
def __lowerCAmelCase ( self : int ) -> List[Any]:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __lowerCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
a__ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __lowerCAmelCase ( self : List[str] , **A__ : Any ) -> Tuple:
'''simple docstring'''
a__ : Dict = self.translate_src_text(**A__ )
self.assertListEqual(self.expected_text , A__ )
def __lowerCAmelCase ( self : int , **A__ : str ) -> Optional[int]:
'''simple docstring'''
a__ : Optional[int] = self.tokenizer(self.src_text , **A__ , return_tensors='''tf''' )
a__ : List[Any] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
a__ : List[str] = self.tokenizer.batch_decode(A__ , skip_special_tokens=A__ )
return generated_words
@slow
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 688 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
__SCREAMING_SNAKE_CASE = get_logger(__name__)
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCamelCase = "dummy_data"
__UpperCamelCase = "datasets"
__UpperCamelCase = False
def __init__( self : Any , A__ : str , A__ : str , A__ : Union[Version, str] , A__ : Optional[str] = None , A__ : bool = False , A__ : bool = True , A__ : Optional[List[Callable]] = None , ) -> int:
'''simple docstring'''
a__ : Tuple = 0
a__ : Any = dataset_name
a__ : int = cache_dir
a__ : str = use_local_dummy_data
a__ : List[str] = config
# download_callbacks take a single url as input
a__ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
a__ : str = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
a__ : Optional[Any] = str(A__ )
# to be downloaded
a__ : Tuple = None
a__ : Tuple = None
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if self._dummy_file is None:
a__ : Dict = self.download_dummy_data()
return self._dummy_file
@property
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
a__ : int = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
a__ : str = cached_path(
A__ , cache_dir=self.cache_dir , extract_compressed_file=A__ , force_extract=A__ )
return os.path.join(A__ , self.dummy_file_name )
@property
def __lowerCAmelCase ( self : int ) -> Optional[int]:
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if self._bucket_url is None:
a__ : int = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
'''simple docstring'''
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Optional[int] , *A__ : int ) -> Union[str, Any]:
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
a__ : Tuple = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
a__ : Union[str, Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(A__ , A__ ):
return self.create_dummy_data_dict(A__ , A__ )
elif isinstance(A__ , (list, tuple) ):
return self.create_dummy_data_list(A__ , A__ )
else:
return self.create_dummy_data_single(A__ , A__ )
def __lowerCAmelCase ( self : List[str] , A__ : Any , *A__ : int ) -> Any:
'''simple docstring'''
return self.download_and_extract(A__ )
def __lowerCAmelCase ( self : Any , A__ : Optional[int] , A__ : Optional[Any] ) -> int:
'''simple docstring'''
return self.download_and_extract(A__ )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : int , *A__ : List[Any] , **A__ : str ) -> Optional[Any]:
'''simple docstring'''
return path
def __lowerCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
return {}
def __lowerCAmelCase ( self : int , A__ : Union[str, Any] , A__ : List[str] ) -> Any:
'''simple docstring'''
a__ : int = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(A__ , A__ ):
for single_url in single_urls:
download_callback(A__ )
else:
a__ : Dict = single_urls
download_callback(A__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(A__ , A__ ):
a__ : Optional[int] = [os.path.join(A__ , urllib.parse.quote_plus(Path(A__ ).name ) ) for x in single_urls]
else:
a__ : Optional[Any] = single_urls
a__ : Tuple = os.path.join(A__ , urllib.parse.quote_plus(Path(A__ ).name ) )
a__ : List[str] = value
# make sure that values are unique
if all(isinstance(A__ , A__ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
a__ : Optional[int] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __lowerCAmelCase ( self : Dict , A__ : str , A__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
a__ : str = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
a__ : Union[str, Any] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , A__ ) ) for url in data_url )
a__ : Optional[Any] = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
a__ : Dict = [data_url[0]] * len(A__ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(A__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
a__ : Optional[int] = os.path.join(A__ , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(A__ )
return dummy_data_list
def __lowerCAmelCase ( self : Dict , A__ : Dict , A__ : str ) -> Optional[int]:
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(A__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
a__ : Union[str, Any] = os.path.join(A__ , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(A__ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __lowerCAmelCase ( self : int ) -> str:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Any , A__ : Tuple ) -> Any:
'''simple docstring'''
def _iter_archive_members(A__ : str ):
# this preserves the order of the members inside the ZIP archive
a__ : Dict = Path(self.dummy_file ).parent
a__ : Tuple = path.relative_to(A__ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
a__ : Optional[Any] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(A__ )
a__ : str = Path(A__ )
a__ : Optional[Any] = _iter_archive_members(A__ ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(A__ ).as_posix(), file_path.open('''rb''' )
def __lowerCAmelCase ( self : Tuple , A__ : Tuple ) -> Tuple:
'''simple docstring'''
if not isinstance(A__ , A__ ):
a__ : int = [paths]
for path in paths:
if os.path.isfile(A__ ):
if os.path.basename(A__ ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(A__ ):
if os.path.basename(A__ ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(A__ ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(A__ , A__ )
| 688 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Tuple , A__ : Any , A__ : str=3 , A__ : Union[str, Any]=3_2 , A__ : Dict=3 , A__ : List[str]=1_0 , A__ : Union[str, Any]=[1_0, 2_0, 3_0, 4_0] , A__ : Any=[1, 1, 2, 1] , A__ : List[str]=True , A__ : Tuple=True , A__ : Optional[Any]="relu" , A__ : str=3 , A__ : Tuple=None , ) -> Dict:
'''simple docstring'''
a__ : List[Any] = parent
a__ : Any = batch_size
a__ : int = image_size
a__ : str = num_channels
a__ : Tuple = embeddings_size
a__ : Optional[Any] = hidden_sizes
a__ : int = depths
a__ : int = is_training
a__ : List[str] = use_labels
a__ : Optional[Any] = hidden_act
a__ : Optional[int] = num_labels
a__ : Optional[Any] = scope
a__ : Optional[Any] = len(A__ )
def __lowerCAmelCase ( self : Optional[int] ) -> Any:
'''simple docstring'''
a__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ : int = None
if self.use_labels:
a__ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
a__ : List[Any] = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Optional[int] , A__ : List[str] , A__ : List[Any] ) -> List[str]:
'''simple docstring'''
a__ : Dict = RegNetModel(config=A__ )
model.to(A__ )
model.eval()
a__ : List[Any] = model(A__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __lowerCAmelCase ( self : Any , A__ : Optional[Any] , A__ : Any , A__ : Union[str, Any] ) -> str:
'''simple docstring'''
a__ : Optional[int] = self.num_labels
a__ : Any = RegNetForImageClassification(A__ )
model.to(A__ )
model.eval()
a__ : Optional[Any] = model(A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : Union[str, Any] ) -> str:
'''simple docstring'''
a__ : Union[str, Any] = self.prepare_config_and_inputs()
a__ , a__ , a__ : List[str] = config_and_inputs
a__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
__UpperCamelCase = (
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def __lowerCAmelCase ( self : Tuple ) -> int:
'''simple docstring'''
a__ : List[Any] = RegNetModelTester(self )
a__ : Optional[int] = ConfigTester(self , config_class=A__ , has_text_modality=A__ )
def __lowerCAmelCase ( self : int ) -> Any:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self : int ) -> str:
'''simple docstring'''
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __lowerCAmelCase ( self : Any ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
a__ , a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : List[Any] = model_class(A__ )
a__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : List[str] = [*signature.parameters.keys()]
a__ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A__ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def __lowerCAmelCase ( self : int ) -> List[Any]:
'''simple docstring'''
a__ , a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Any = model_class(config=A__ )
for name, module in model.named_modules():
if isinstance(A__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
def check_hidden_states_output(A__ : List[str] , A__ : Optional[Any] , A__ : Any ):
a__ : List[Any] = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
a__ : List[str] = model(**self._prepare_for_class(A__ , A__ ) )
a__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a__ : Dict = self.model_tester.num_stages
self.assertEqual(len(A__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
a__ , a__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Dict = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
a__ : Dict = layer_type
a__ : Any = True
check_hidden_states_output(A__ , A__ , A__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ : Dict = True
check_hidden_states_output(A__ , A__ , A__ )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
@slow
def __lowerCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : List[Any] = RegNetModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
def __a ( ):
a__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(A__ )
a__ : str = self.default_image_processor
a__ : str = prepare_img()
a__ : int = image_processor(images=A__ , return_tensors='''pt''' ).to(A__ )
# forward pass
with torch.no_grad():
a__ : Optional[Any] = model(**A__ )
# verify the logits
a__ : Union[str, Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , A__ )
a__ : List[str] = torch.tensor([-0.4_180, -1.5_051, -3.4_836] ).to(A__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A__ , atol=1E-4 ) )
| 688 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = LxmertTokenizer
__UpperCamelCase = LxmertTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def __lowerCAmelCase ( self : str ) -> str:
'''simple docstring'''
super().setUp()
a__ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self : int , A__ : int ) -> int:
'''simple docstring'''
a__ : List[Any] = '''UNwant\u00E9d,running'''
a__ : Optional[int] = '''unwanted, running'''
return input_text, output_text
def __lowerCAmelCase ( self : int ) -> Dict:
'''simple docstring'''
a__ : Optional[int] = self.tokenizer_class(self.vocab_file )
a__ : List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [7, 4, 5, 1_0, 8, 9] )
def __lowerCAmelCase ( self : Any ) -> Dict:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a__ : Union[str, Any] = self.get_tokenizer()
a__ : Union[str, Any] = self.get_rust_tokenizer()
a__ : str = '''I was born in 92000, and this is falsé.'''
a__ : Tuple = tokenizer.tokenize(A__ )
a__ : Tuple = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
a__ : Optional[int] = tokenizer.encode(A__ , add_special_tokens=A__ )
a__ : Optional[Any] = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
a__ : List[str] = self.get_rust_tokenizer()
a__ : str = tokenizer.encode(A__ )
a__ : int = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
| 688 | 1 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__SCREAMING_SNAKE_CASE = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 6_5_5_3_6,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 6_5_5_3_6,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 1_3_1_0_7_2,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
}
def __a ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict ):
return torch.atana(lowerCAmelCase__ , lowerCAmelCase__ ) / math.pi * 2
def __a ( lowerCAmelCase__ : Union[str, Any] ):
a__ : List[Any] = torch.sin(t * math.pi / 2 ) ** 2
a__ : str = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowerCAmelCase__ , lowerCAmelCase__ )
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
pass
class lowerCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , A__ : Union[str, Any] ) -> str:
'''simple docstring'''
super().__init__()
a__ : Tuple = DiffusionAttnUnetaD(A__ , n_attn_layers=4 )
a__ : List[str] = deepcopy(self.diffusion )
a__ : Union[str, Any] = torch.quasirandom.SobolEngine(1 , scramble=A__ )
def __a ( lowerCAmelCase__ : Optional[Any] ):
a__ : Dict = MODELS_MAP[model_name]['''url''']
os.system(F'wget {url} ./' )
return F'./{model_name}.ckpt'
__SCREAMING_SNAKE_CASE = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
__SCREAMING_SNAKE_CASE = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
__SCREAMING_SNAKE_CASE = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
__SCREAMING_SNAKE_CASE = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
__SCREAMING_SNAKE_CASE = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
__SCREAMING_SNAKE_CASE = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def __a ( lowerCAmelCase__ : List[Any] ):
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(F'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __a ( lowerCAmelCase__ : str ):
for key, value in ATTN_MAP.items():
if name.startswith(lowerCAmelCase__ ) and not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return name.replace(lowerCAmelCase__ , lowerCAmelCase__ )
elif name.startswith(lowerCAmelCase__ ):
return [name.replace(lowerCAmelCase__ , lowerCAmelCase__ ) for v in value]
raise ValueError(F'Attn error with {name}' )
def __a ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str=13 ):
a__ : Tuple = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
a__ : int = 0
if string.startswith('''net.3.''' ):
depth += 1
a__ : Optional[int] = string[6:]
elif string.startswith('''net.''' ):
a__ : Dict = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
a__ : Optional[int] = string[7:]
if string.startswith('''main.''' ):
a__ : Any = string[5:]
# mid block
if string[:2].isdigit():
a__ : List[str] = string[:2]
a__ : int = string[2:]
else:
a__ : str = string[0]
a__ : str = string[1:]
if depth == max_depth:
a__ : Any = MID_NUM_TO_LAYER[layer_num]
a__ : Tuple = '''mid_block'''
elif depth > 0 and int(lowerCAmelCase__ ) < 7:
a__ : Optional[int] = DOWN_NUM_TO_LAYER[layer_num]
a__ : int = F'down_blocks.{depth}'
elif depth > 0 and int(lowerCAmelCase__ ) > 7:
a__ : Any = UP_NUM_TO_LAYER[layer_num]
a__ : Any = F'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
a__ : Dict = DEPTH_0_TO_LAYER[layer_num]
a__ : int = F'up_blocks.{max_depth - 1}' if int(lowerCAmelCase__ ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(F'Naming error with {input_string} and string_left: {string_left}.' )
a__ : str = string_left[1:]
if "resnets" in new_layer:
a__ : Optional[int] = convert_resconv_naming(lowerCAmelCase__ )
elif "attentions" in new_layer:
a__ : Union[str, Any] = convert_attn_naming(lowerCAmelCase__ )
a__ : int = new_string_left
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : Any = prefix + '''.''' + new_layer + '''.''' + string_left
else:
a__ : Union[str, Any] = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def __a ( lowerCAmelCase__ : Union[str, Any] ):
a__ : Optional[int] = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
a__ : Tuple = rename(lowerCAmelCase__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : List[str] = transform_conv_attns(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
a__ : Union[str, Any] = v
return new_state_dict
def __a ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict ):
if len(lowerCAmelCase__ ) == 1:
if len(v.shape ) == 3:
# weight
a__ : int = v[:, :, 0]
else:
# bias
a__ : Optional[int] = v
else:
# qkv matrices
a__ : Optional[Any] = v.shape[0]
a__ : List[str] = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
a__ : List[Any] = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
a__ : str = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __a ( lowerCAmelCase__ : Tuple ):
a__ : List[Any] = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
a__ : List[str] = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
a__ : int = download(lowerCAmelCase__ )
a__ : Optional[int] = MODELS_MAP[model_name]['''sample_rate''']
a__ : Tuple = MODELS_MAP[model_name]['''sample_size''']
a__ : Union[str, Any] = Object()
a__ : List[Any] = sample_size
a__ : Dict = sample_rate
a__ : Union[str, Any] = 0
a__ : List[Any] = UNetaDModel(sample_size=lowerCAmelCase__ , sample_rate=lowerCAmelCase__ )
a__ : Union[str, Any] = diffusers_model.state_dict()
a__ : Union[str, Any] = DiffusionUncond(lowerCAmelCase__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCAmelCase__ )['''state_dict'''] )
a__ : List[Any] = orig_model.diffusion_ema.eval()
a__ : Union[str, Any] = orig_model.state_dict()
a__ : int = rename_orig_weights(lowerCAmelCase__ )
a__ : Optional[Any] = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
a__ : Any = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowerCAmelCase__ ) == 0, F'Problem with {renamed_minus_diffusers}'
assert all(k.endswith('''kernel''' ) for k in list(lowerCAmelCase__ ) ), F'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
a__ : Tuple = value.squeeze()
a__ : Any = value
diffusers_model.load_state_dict(lowerCAmelCase__ )
a__ : Union[str, Any] = 100
a__ : Union[str, Any] = 33
a__ : str = IPNDMScheduler(num_train_timesteps=lowerCAmelCase__ )
a__ : Tuple = torch.manual_seed(lowerCAmelCase__ )
a__ : int = torch.randn([1, 2, config.sample_size] , generator=lowerCAmelCase__ ).to(lowerCAmelCase__ )
a__ : List[Any] = torch.linspace(1 , 0 , steps + 1 , device=lowerCAmelCase__ )[:-1]
a__ : int = get_crash_schedule(lowerCAmelCase__ )
a__ : Optional[Any] = DanceDiffusionPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
a__ : Optional[int] = torch.manual_seed(33 )
a__ : Dict = pipe(num_inference_steps=lowerCAmelCase__ , generator=lowerCAmelCase__ ).audios
a__ : str = sampling.iplms_sample(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , {} )
a__ : List[Any] = generated.clamp(-1 , 1 )
a__ : Union[str, Any] = (generated - audio).abs().sum()
a__ : str = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , lowerCAmelCase__ )
print('''Diff max''' , lowerCAmelCase__ )
assert diff_max < 1E-3, F'Diff max: {diff_max} is too much :-/'
print(F'Conversion for {model_name} successful!' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
__SCREAMING_SNAKE_CASE = parser.parse_args()
main(args)
| 688 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str ):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
a__ : Dict = TapasConfig.from_json_file(lowerCAmelCase__ )
# set absolute/relative position embeddings parameter
a__ : List[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
a__ : Optional[Any] = TapasForQuestionAnswering(config=lowerCAmelCase__ )
elif task == "WTQ":
# run_task_main.py hparams
a__ : List[str] = 4
a__ : Optional[int] = True
# hparam_utils.py hparams
a__ : List[Any] = 0.664694
a__ : List[Any] = 0.207951
a__ : Union[str, Any] = 0.121194
a__ : Optional[Any] = True
a__ : Optional[int] = True
a__ : List[str] = False
a__ : Union[str, Any] = 0.0352513
a__ : Any = TapasForQuestionAnswering(config=lowerCAmelCase__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
a__ : Tuple = 4
a__ : Dict = False
# hparam_utils.py hparams
a__ : str = 36.4519
a__ : str = 0.903421
a__ : Optional[Any] = 222.088
a__ : Dict = True
a__ : Dict = True
a__ : Dict = True
a__ : str = 0.763141
a__ : List[Any] = TapasForQuestionAnswering(config=lowerCAmelCase__ )
elif task == "TABFACT":
a__ : List[str] = TapasForSequenceClassification(config=lowerCAmelCase__ )
elif task == "MLM":
a__ : Tuple = TapasForMaskedLM(config=lowerCAmelCase__ )
elif task == "INTERMEDIATE_PRETRAINING":
a__ : List[str] = TapasModel(config=lowerCAmelCase__ )
else:
raise ValueError(F'Task {task} not supported.' )
print(F'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model (weights and configuration)
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowerCAmelCase__ )
# Save tokenizer files
print(F'Save tokenizer files to {pytorch_dump_path}' )
a__ : Optional[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + '''vocab.txt''' , model_max_length=512 )
tokenizer.save_pretrained(lowerCAmelCase__ )
print('''Used relative position embeddings:''' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 688 | 1 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__SCREAMING_SNAKE_CASE = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__SCREAMING_SNAKE_CASE = tuple[int, int]
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : str , A__ : int , A__ : int , A__ : int , A__ : int , A__ : int , A__ : Node | None , ) -> None:
'''simple docstring'''
a__ : Optional[int] = pos_x
a__ : str = pos_y
a__ : Optional[int] = (pos_y, pos_x)
a__ : List[str] = goal_x
a__ : Any = goal_y
a__ : Any = g_cost
a__ : Optional[int] = parent
a__ : Union[str, Any] = self.calculate_heuristic()
a__ : List[Any] = self.g_cost + self.h_cost
def __lowerCAmelCase ( self : Union[str, Any] ) -> float:
'''simple docstring'''
a__ : List[str] = self.pos_x - self.goal_x
a__ : List[str] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(A__ ) + abs(A__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : List[Any] , A__ : Node ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , A__ : TPosition , A__ : TPosition ) -> Optional[Any]:
'''simple docstring'''
a__ : int = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , A__ )
a__ : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , A__ )
a__ : Dict = [self.start]
a__ : list[Node] = []
a__ : str = False
def __lowerCAmelCase ( self : List[str] ) -> list[TPosition]:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
a__ : Dict = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(A__ )
self.closed_nodes.append(A__ )
a__ : List[Any] = self.get_successors(A__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(A__ )
else:
# retrieve the best current path
a__ : Optional[int] = self.open_nodes.pop(self.open_nodes.index(A__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(A__ )
else:
self.open_nodes.append(A__ )
return [self.start.pos]
def __lowerCAmelCase ( self : Optional[Any] , A__ : Node ) -> list[Node]:
'''simple docstring'''
a__ : Optional[int] = []
for action in delta:
a__ : List[Any] = parent.pos_x + action[1]
a__ : Tuple = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
A__ , A__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , A__ , ) )
return successors
def __lowerCAmelCase ( self : List[Any] , A__ : Node | None ) -> list[TPosition]:
'''simple docstring'''
a__ : Union[str, Any] = node
a__ : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
a__ : Any = current_node.parent
path.reverse()
return path
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , A__ : TPosition , A__ : TPosition ) -> None:
'''simple docstring'''
a__ : str = AStar(A__ , A__ )
a__ : Optional[int] = AStar(A__ , A__ )
a__ : List[str] = False
def __lowerCAmelCase ( self : Tuple ) -> list[TPosition]:
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
a__ : int = self.fwd_astar.open_nodes.pop(0 )
a__ : List[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
A__ , A__ )
self.fwd_astar.closed_nodes.append(A__ )
self.bwd_astar.closed_nodes.append(A__ )
a__ : Tuple = current_bwd_node
a__ : Optional[int] = current_fwd_node
a__ : Optional[int] = {
self.fwd_astar: self.fwd_astar.get_successors(A__ ),
self.bwd_astar: self.bwd_astar.get_successors(A__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(A__ )
else:
# retrieve the best current path
a__ : Optional[Any] = astar.open_nodes.pop(
astar.open_nodes.index(A__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(A__ )
else:
astar.open_nodes.append(A__ )
return [self.fwd_astar.start.pos]
def __lowerCAmelCase ( self : List[str] , A__ : Node , A__ : Node ) -> list[TPosition]:
'''simple docstring'''
a__ : str = self.fwd_astar.retrace_path(A__ )
a__ : List[str] = self.bwd_astar.retrace_path(A__ )
bwd_path.pop()
bwd_path.reverse()
a__ : Optional[int] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__SCREAMING_SNAKE_CASE = (0, 0)
__SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__SCREAMING_SNAKE_CASE = time.time()
__SCREAMING_SNAKE_CASE = AStar(init, goal)
__SCREAMING_SNAKE_CASE = a_star.search()
__SCREAMING_SNAKE_CASE = time.time() - start_time
print(f'AStar execution time = {end_time:f} seconds')
__SCREAMING_SNAKE_CASE = time.time()
__SCREAMING_SNAKE_CASE = BidirectionalAStar(init, goal)
__SCREAMING_SNAKE_CASE = time.time() - bd_start_time
print(f'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 688 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
__SCREAMING_SNAKE_CASE = {
'google/fnet-base': 5_1_2,
'google/fnet-large': 5_1_2,
}
__SCREAMING_SNAKE_CASE = '▁'
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "token_type_ids"]
__UpperCamelCase = FNetTokenizer
def __init__( self : Any , A__ : Any=None , A__ : int=None , A__ : List[str]=False , A__ : int=True , A__ : str=True , A__ : List[Any]="<unk>" , A__ : Dict="[SEP]" , A__ : List[str]="<pad>" , A__ : Union[str, Any]="[CLS]" , A__ : Dict="[MASK]" , **A__ : Tuple , ) -> List[str]:
'''simple docstring'''
a__ : Optional[int] = (
AddedToken(A__ , lstrip=A__ , rstrip=A__ , normalized=A__ )
if isinstance(A__ , A__ )
else mask_token
)
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , remove_space=A__ , keep_accents=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , **A__ , )
a__ : Optional[Any] = do_lower_case
a__ : Dict = remove_space
a__ : List[Any] = keep_accents
a__ : Optional[Any] = vocab_file
a__ : Any = False if not self.vocab_file else True
def __lowerCAmelCase ( self : str , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : Optional[int] = [self.sep_token_id]
a__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCAmelCase ( self : List[Any] , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : Dict = [self.sep_token_id]
a__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Tuple , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a__ : Union[str, Any] = os.path.join(
A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ):
copyfile(self.vocab_file , A__ )
return (out_vocab_file,)
| 688 | 1 |
'''simple docstring'''
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__SCREAMING_SNAKE_CASE = 4
__SCREAMING_SNAKE_CASE = 3
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
pass
def __a ( lowerCAmelCase__ : List[str] ):
for shard in shards:
for i in range(lowerCAmelCase__ ):
yield {"i": i, "shard": shard}
def __a ( ):
a__ : str = int(os.environ['''RANK'''] )
a__ : int = int(os.environ['''WORLD_SIZE'''] )
a__ : str = ArgumentParser()
parser.add_argument('''--streaming''' , type=lowerCAmelCase__ )
parser.add_argument('''--local_rank''' , type=lowerCAmelCase__ )
parser.add_argument('''--num_workers''' , type=lowerCAmelCase__ , default=0 )
a__ : int = parser.parse_args()
a__ : List[str] = args.streaming
a__ : Dict = args.num_workers
a__ : Dict = {'''shards''': [F'shard_{shard_idx}' for shard_idx in range(lowerCAmelCase__ )]}
a__ : Tuple = IterableDataset.from_generator(lowerCAmelCase__ , gen_kwargs=lowerCAmelCase__ )
if not streaming:
a__ : str = Dataset.from_list(list(lowerCAmelCase__ ) )
a__ : Optional[int] = split_dataset_by_node(lowerCAmelCase__ , rank=lowerCAmelCase__ , world_size=lowerCAmelCase__ )
a__ : Dict = torch.utils.data.DataLoader(lowerCAmelCase__ , num_workers=lowerCAmelCase__ )
a__ : str = NUM_SHARDS * NUM_ITEMS_PER_SHARD
a__ : Dict = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
a__ : str = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'local_size {local_size} != expected_local_size {expected_local_size}' )
if __name__ == "__main__":
main()
| 688 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-german-cased': (
'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'
),
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'
),
},
}
__SCREAMING_SNAKE_CASE = {
'distilbert-base-uncased': 5_1_2,
'distilbert-base-uncased-distilled-squad': 5_1_2,
'distilbert-base-cased': 5_1_2,
'distilbert-base-cased-distilled-squad': 5_1_2,
'distilbert-base-german-cased': 5_1_2,
'distilbert-base-multilingual-cased': 5_1_2,
}
__SCREAMING_SNAKE_CASE = {
'distilbert-base-uncased': {'do_lower_case': True},
'distilbert-base-uncased-distilled-squad': {'do_lower_case': True},
'distilbert-base-cased': {'do_lower_case': False},
'distilbert-base-cased-distilled-squad': {'do_lower_case': False},
'distilbert-base-german-cased': {'do_lower_case': False},
'distilbert-base-multilingual-cased': {'do_lower_case': False},
}
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = ["input_ids", "attention_mask"]
__UpperCamelCase = DistilBertTokenizer
def __init__( self : str , A__ : Optional[Any]=None , A__ : Any=None , A__ : Tuple=True , A__ : List[Any]="[UNK]" , A__ : List[str]="[SEP]" , A__ : Tuple="[PAD]" , A__ : Optional[int]="[CLS]" , A__ : Union[str, Any]="[MASK]" , A__ : List[str]=True , A__ : Any=None , **A__ : int , ) -> str:
'''simple docstring'''
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , tokenize_chinese_chars=A__ , strip_accents=A__ , **A__ , )
a__ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , A__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , A__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , A__ ) != tokenize_chinese_chars
):
a__ : int = getattr(A__ , normalizer_state.pop('''type''' ) )
a__ : List[Any] = do_lower_case
a__ : str = strip_accents
a__ : List[str] = tokenize_chinese_chars
a__ : Dict = normalizer_class(**A__ )
a__ : List[Any] = do_lower_case
def __lowerCAmelCase ( self : Tuple , A__ : List[str] , A__ : Dict=None ) -> List[str]:
'''simple docstring'''
a__ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : int , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : List[str] = [self.sep_token_id]
a__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : str , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
a__ : int = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
| 688 | 1 |
'''simple docstring'''
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__SCREAMING_SNAKE_CASE = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__SCREAMING_SNAKE_CASE = subprocess.check_output(f'git diff --name-only {fork_point_sha}'.split()).decode('utf-8').split()
__SCREAMING_SNAKE_CASE = '|'.join(sys.argv[1:])
__SCREAMING_SNAKE_CASE = re.compile(Rf'^({joined_dirs}).*?\.py$')
__SCREAMING_SNAKE_CASE = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 688 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__SCREAMING_SNAKE_CASE = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__SCREAMING_SNAKE_CASE = tuple[int, int]
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : str , A__ : int , A__ : int , A__ : int , A__ : int , A__ : int , A__ : Node | None , ) -> None:
'''simple docstring'''
a__ : Optional[int] = pos_x
a__ : str = pos_y
a__ : Optional[int] = (pos_y, pos_x)
a__ : List[str] = goal_x
a__ : Any = goal_y
a__ : Any = g_cost
a__ : Optional[int] = parent
a__ : Union[str, Any] = self.calculate_heuristic()
a__ : List[Any] = self.g_cost + self.h_cost
def __lowerCAmelCase ( self : Union[str, Any] ) -> float:
'''simple docstring'''
a__ : List[str] = self.pos_x - self.goal_x
a__ : List[str] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(A__ ) + abs(A__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : List[Any] , A__ : Node ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , A__ : TPosition , A__ : TPosition ) -> Optional[Any]:
'''simple docstring'''
a__ : int = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , A__ )
a__ : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , A__ )
a__ : Dict = [self.start]
a__ : list[Node] = []
a__ : str = False
def __lowerCAmelCase ( self : List[str] ) -> list[TPosition]:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
a__ : Dict = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(A__ )
self.closed_nodes.append(A__ )
a__ : List[Any] = self.get_successors(A__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(A__ )
else:
# retrieve the best current path
a__ : Optional[int] = self.open_nodes.pop(self.open_nodes.index(A__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(A__ )
else:
self.open_nodes.append(A__ )
return [self.start.pos]
def __lowerCAmelCase ( self : Optional[Any] , A__ : Node ) -> list[Node]:
'''simple docstring'''
a__ : Optional[int] = []
for action in delta:
a__ : List[Any] = parent.pos_x + action[1]
a__ : Tuple = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
A__ , A__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , A__ , ) )
return successors
def __lowerCAmelCase ( self : List[Any] , A__ : Node | None ) -> list[TPosition]:
'''simple docstring'''
a__ : Union[str, Any] = node
a__ : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
a__ : Any = current_node.parent
path.reverse()
return path
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , A__ : TPosition , A__ : TPosition ) -> None:
'''simple docstring'''
a__ : str = AStar(A__ , A__ )
a__ : Optional[int] = AStar(A__ , A__ )
a__ : List[str] = False
def __lowerCAmelCase ( self : Tuple ) -> list[TPosition]:
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
a__ : int = self.fwd_astar.open_nodes.pop(0 )
a__ : List[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
A__ , A__ )
self.fwd_astar.closed_nodes.append(A__ )
self.bwd_astar.closed_nodes.append(A__ )
a__ : Tuple = current_bwd_node
a__ : Optional[int] = current_fwd_node
a__ : Optional[int] = {
self.fwd_astar: self.fwd_astar.get_successors(A__ ),
self.bwd_astar: self.bwd_astar.get_successors(A__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(A__ )
else:
# retrieve the best current path
a__ : Optional[Any] = astar.open_nodes.pop(
astar.open_nodes.index(A__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(A__ )
else:
astar.open_nodes.append(A__ )
return [self.fwd_astar.start.pos]
def __lowerCAmelCase ( self : List[str] , A__ : Node , A__ : Node ) -> list[TPosition]:
'''simple docstring'''
a__ : str = self.fwd_astar.retrace_path(A__ )
a__ : List[str] = self.bwd_astar.retrace_path(A__ )
bwd_path.pop()
bwd_path.reverse()
a__ : Optional[int] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__SCREAMING_SNAKE_CASE = (0, 0)
__SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__SCREAMING_SNAKE_CASE = time.time()
__SCREAMING_SNAKE_CASE = AStar(init, goal)
__SCREAMING_SNAKE_CASE = a_star.search()
__SCREAMING_SNAKE_CASE = time.time() - start_time
print(f'AStar execution time = {end_time:f} seconds')
__SCREAMING_SNAKE_CASE = time.time()
__SCREAMING_SNAKE_CASE = BidirectionalAStar(init, goal)
__SCREAMING_SNAKE_CASE = time.time() - bd_start_time
print(f'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 688 | 1 |
'''simple docstring'''
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = R'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n'
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
@add_start_docstrings(A__ )
def __call__( self : Optional[int] , A__ : torch.LongTensor , A__ : torch.FloatTensor , **A__ : Optional[int] ) -> bool:
'''simple docstring'''
raise NotImplementedError('''StoppingCriteria needs to be subclassed''' )
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[int] , A__ : int , A__ : Optional[int] = None ) -> str:
'''simple docstring'''
a__ : str = max_length
a__ : List[Any] = max_position_embeddings
@add_start_docstrings(A__ )
def __call__( self : Dict , A__ : torch.LongTensor , A__ : torch.FloatTensor , **A__ : List[str] ) -> bool:
'''simple docstring'''
a__ : Tuple = input_ids.shape[-1]
a__ : int = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'''This is a friendly reminder - the current text generation call will exceed the model\'s predefined '''
F'maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '
'''exceptions, performance degradation, or nothing at all.''' )
return is_done
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , A__ : int , A__ : int ) -> str:
'''simple docstring'''
warnings.warn(
'''The class `MaxNewTokensCriteria` is deprecated. '''
F'Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '
'''with `max_length = start_length + max_new_tokens` instead.''' , A__ , )
a__ : int = start_length
a__ : Optional[Any] = max_new_tokens
a__ : int = start_length + max_new_tokens
@add_start_docstrings(A__ )
def __call__( self : Dict , A__ : torch.LongTensor , A__ : torch.FloatTensor , **A__ : Dict ) -> bool:
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , A__ : float , A__ : Optional[float] = None ) -> List[str]:
'''simple docstring'''
a__ : List[str] = max_time
a__ : Union[str, Any] = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(A__ )
def __call__( self : List[str] , A__ : torch.LongTensor , A__ : torch.FloatTensor , **A__ : Optional[int] ) -> bool:
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
@add_start_docstrings(A__ )
def __call__( self : Dict , A__ : torch.LongTensor , A__ : torch.FloatTensor , **A__ : Optional[Any] ) -> bool:
'''simple docstring'''
return any(criteria(A__ , A__ ) for criteria in self )
@property
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for stopping_criterium in self:
if isinstance(A__ , A__ ):
return stopping_criterium.max_length
elif isinstance(A__ , A__ ):
return stopping_criterium.max_length
return None
def __a ( lowerCAmelCase__ : StoppingCriteriaList , lowerCAmelCase__ : int ):
a__ : List[Any] = stopping_criteria.max_length
a__ : Union[str, Any] = deepcopy(lowerCAmelCase__ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''' , lowerCAmelCase__ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=lowerCAmelCase__ ) )
return new_stopping_criteria
| 688 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __a ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] ):
# Construct model
if gpta_config_file == "":
a__ : Union[str, Any] = GPTaConfig()
else:
a__ : Dict = GPTaConfig.from_json_file(lowerCAmelCase__ )
a__ : Optional[int] = GPTaModel(lowerCAmelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
a__ : int = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
a__ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 688 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
__SCREAMING_SNAKE_CASE = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def __a ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str=8 ):
a__ : List[str] = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
a__ : Tuple = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , A__ : MultilingualCLIP , A__ : XLMRobertaTokenizer , A__ : UNetaDConditionModel , A__ : Union[DDIMScheduler, DDPMScheduler] , A__ : VQModel , ) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=A__ , tokenizer=A__ , unet=A__ , scheduler=A__ , movq=A__ , )
a__ : str = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCAmelCase ( self : int , A__ : Dict , A__ : Dict , A__ : List[Any] , A__ : int , A__ : Union[str, Any] , A__ : Tuple ) -> Tuple:
'''simple docstring'''
if latents is None:
a__ : List[Any] = randn_tensor(A__ , generator=A__ , device=A__ , dtype=A__ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
a__ : int = latents.to(A__ )
a__ : Any = latents * scheduler.init_noise_sigma
return latents
def __lowerCAmelCase ( self : Optional[int] , A__ : List[Any] , A__ : List[str] , A__ : str , A__ : Optional[Any] , A__ : int=None , ) -> List[str]:
'''simple docstring'''
a__ : Dict = len(A__ ) if isinstance(A__ , A__ ) else 1
# get prompt text embeddings
a__ : str = self.tokenizer(
A__ , padding='''max_length''' , truncation=A__ , max_length=7_7 , return_attention_mask=A__ , add_special_tokens=A__ , return_tensors='''pt''' , )
a__ : str = text_inputs.input_ids
a__ : int = self.tokenizer(A__ , padding='''longest''' , return_tensors='''pt''' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(A__ , A__ ):
a__ : List[str] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
a__ : Optional[Any] = text_input_ids.to(A__ )
a__ : Dict = text_inputs.attention_mask.to(A__ )
a__ , a__ : Tuple = self.text_encoder(
input_ids=A__ , attention_mask=A__ )
a__ : Optional[int] = prompt_embeds.repeat_interleave(A__ , dim=0 )
a__ : str = text_encoder_hidden_states.repeat_interleave(A__ , dim=0 )
a__ : List[str] = text_mask.repeat_interleave(A__ , dim=0 )
if do_classifier_free_guidance:
a__ : List[str]
if negative_prompt is None:
a__ : Optional[int] = [''''''] * batch_size
elif type(A__ ) is not type(A__ ):
raise TypeError(
F'`negative_prompt` should be the same type to `prompt`, but got {type(A__ )} !='
F' {type(A__ )}.' )
elif isinstance(A__ , A__ ):
a__ : str = [negative_prompt]
elif batch_size != len(A__ ):
raise ValueError(
F'`negative_prompt`: {negative_prompt} has batch size {len(A__ )}, but `prompt`:'
F' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
''' the batch size of `prompt`.''' )
else:
a__ : Optional[int] = negative_prompt
a__ : int = self.tokenizer(
A__ , padding='''max_length''' , max_length=7_7 , truncation=A__ , return_attention_mask=A__ , add_special_tokens=A__ , return_tensors='''pt''' , )
a__ : Dict = uncond_input.input_ids.to(A__ )
a__ : List[Any] = uncond_input.attention_mask.to(A__ )
a__ , a__ : Optional[int] = self.text_encoder(
input_ids=A__ , attention_mask=A__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
a__ : Optional[Any] = negative_prompt_embeds.shape[1]
a__ : Union[str, Any] = negative_prompt_embeds.repeat(1 , A__ )
a__ : Dict = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A__ )
a__ : Optional[int] = uncond_text_encoder_hidden_states.shape[1]
a__ : List[str] = uncond_text_encoder_hidden_states.repeat(1 , A__ , 1 )
a__ : int = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , A__ , -1 )
a__ : List[Any] = uncond_text_mask.repeat_interleave(A__ , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
a__ : str = torch.cat([negative_prompt_embeds, prompt_embeds] )
a__ : str = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
a__ : Union[str, Any] = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __lowerCAmelCase ( self : Tuple , A__ : Tuple=0 ) -> Optional[int]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
a__ : Optional[Any] = torch.device(F'cuda:{gpu_id}' )
a__ : Optional[Any] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A__ , A__ )
def __lowerCAmelCase ( self : Dict , A__ : Optional[Any]=0 ) -> Any:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
a__ : Tuple = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=A__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
a__ : str = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
a__ , a__ : Optional[int] = cpu_offload_with_hook(A__ , A__ , prev_module_hook=A__ )
if self.safety_checker is not None:
a__ , a__ : List[str] = cpu_offload_with_hook(self.safety_checker , A__ , prev_module_hook=A__ )
# We'll offload the last model manually.
a__ : str = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCAmelCase ( self : int ) -> Any:
'''simple docstring'''
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(A__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A__ )
def __call__( self : Dict , A__ : Union[str, List[str]] , A__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A__ : Optional[Union[str, List[str]]] = None , A__ : int = 5_1_2 , A__ : int = 5_1_2 , A__ : int = 1_0_0 , A__ : float = 4.0 , A__ : int = 1 , A__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A__ : Optional[torch.FloatTensor] = None , A__ : Optional[str] = "pil" , A__ : bool = True , ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(A__ , A__ ):
a__ : Union[str, Any] = 1
elif isinstance(A__ , A__ ):
a__ : List[str] = len(A__ )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(A__ )}' )
a__ : List[str] = self._execution_device
a__ : int = batch_size * num_images_per_prompt
a__ : List[Any] = guidance_scale > 1.0
a__ , a__ , a__ : str = self._encode_prompt(
A__ , A__ , A__ , A__ , A__ )
if isinstance(A__ , A__ ):
a__ : List[Any] = torch.cat(A__ , dim=0 )
if isinstance(A__ , A__ ):
a__ : List[str] = torch.cat(A__ , dim=0 )
if do_classifier_free_guidance:
a__ : Any = image_embeds.repeat_interleave(A__ , dim=0 )
a__ : Dict = negative_image_embeds.repeat_interleave(A__ , dim=0 )
a__ : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=A__ )
self.scheduler.set_timesteps(A__ , device=A__ )
a__ : Optional[Any] = self.scheduler.timesteps
a__ : List[Any] = self.unet.config.in_channels
a__ , a__ : Union[str, Any] = get_new_h_w(A__ , A__ , self.movq_scale_factor )
# create initial latent
a__ : Any = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , A__ , A__ , A__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A__ ) ):
# expand the latents if we are doing classifier free guidance
a__ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a__ : Any = {'''text_embeds''': prompt_embeds, '''image_embeds''': image_embeds}
a__ : Dict = self.unet(
sample=A__ , timestep=A__ , encoder_hidden_states=A__ , added_cond_kwargs=A__ , return_dict=A__ , )[0]
if do_classifier_free_guidance:
a__ , a__ : Any = noise_pred.split(latents.shape[1] , dim=1 )
a__ , a__ : List[str] = noise_pred.chunk(2 )
a__ , a__ : Tuple = variance_pred.chunk(2 )
a__ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
a__ : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
a__ , a__ : str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
a__ : int = self.scheduler.step(
A__ , A__ , A__ , generator=A__ , ).prev_sample
# post-processing
a__ : Dict = self.movq.decode(A__ , force_not_quantize=A__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
a__ : Optional[Any] = image * 0.5 + 0.5
a__ : str = image.clamp(0 , 1 )
a__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a__ : List[str] = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 688 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
__SCREAMING_SNAKE_CASE = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
__SCREAMING_SNAKE_CASE = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
__SCREAMING_SNAKE_CASE = reader.read()
__SCREAMING_SNAKE_CASE = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
__SCREAMING_SNAKE_CASE = UNetaDModel(**config)
else:
__SCREAMING_SNAKE_CASE = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
__SCREAMING_SNAKE_CASE = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__SCREAMING_SNAKE_CASE = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__SCREAMING_SNAKE_CASE = config[key]
del config[key]
__SCREAMING_SNAKE_CASE = [k.replace('UNetRes', '') for k in config['down_block_types']]
__SCREAMING_SNAKE_CASE = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
__SCREAMING_SNAKE_CASE = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
__SCREAMING_SNAKE_CASE = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
__SCREAMING_SNAKE_CASE = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
__SCREAMING_SNAKE_CASE = param_value
__SCREAMING_SNAKE_CASE = True
if not has_changed:
__SCREAMING_SNAKE_CASE = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 688 | 1 |
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCAmelCase__ :
"""simple docstring"""
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
return self.get_dummy_input()
@property
def __lowerCAmelCase ( self : Tuple ) -> Any:
'''simple docstring'''
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(F'\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.' )
def __lowerCAmelCase ( self : Dict , A__ : Tuple=True , A__ : List[Any]=False , A__ : List[str]=False , A__ : List[Any]=False , ) -> str:
'''simple docstring'''
a__ : List[Any] = 4
a__ : Dict = 3_2
a__ : int = (3_2, 3_2)
a__ : Union[str, Any] = torch.manual_seed(0 )
a__ : Union[str, Any] = torch.device(A__ )
a__ : Optional[int] = (batch_size, num_channels) + sizes
a__ : Optional[Any] = randn_tensor(A__ , generator=A__ , device=A__ )
a__ : int = {'''hidden_states''': hidden_states}
if include_temb:
a__ : List[Any] = 1_2_8
a__ : List[str] = randn_tensor((batch_size, temb_channels) , generator=A__ , device=A__ )
if include_res_hidden_states_tuple:
a__ : str = torch.manual_seed(1 )
a__ : int = (randn_tensor(A__ , generator=A__ , device=A__ ),)
if include_encoder_hidden_states:
a__ : List[Any] = floats_tensor((batch_size, 3_2, 3_2) ).to(A__ )
if include_skip_sample:
a__ : Optional[Any] = randn_tensor(((batch_size, 3) + sizes) , generator=A__ , device=A__ )
return dummy_input
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
a__ : List[str] = {
'''in_channels''': 3_2,
'''out_channels''': 3_2,
'''temb_channels''': 1_2_8,
}
if self.block_type == "up":
a__ : int = 3_2
if self.block_type == "mid":
init_dict.pop('''out_channels''' )
a__ : Tuple = self.dummy_input
return init_dict, inputs_dict
def __lowerCAmelCase ( self : Dict , A__ : Any ) -> str:
'''simple docstring'''
a__ , a__ : List[Any] = self.prepare_init_args_and_inputs_for_common()
a__ : Union[str, Any] = self.block_class(**A__ )
unet_block.to(A__ )
unet_block.eval()
with torch.no_grad():
a__ : Tuple = unet_block(**A__ )
if isinstance(A__ , A__ ):
a__ : Dict = output[0]
self.assertEqual(output.shape , self.output_shape )
a__ : Optional[int] = output[0, -1, -3:, -3:]
a__ : List[str] = torch.tensor(A__ ).to(A__ )
assert torch_all_close(output_slice.flatten() , A__ , atol=5E-3 )
@unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
a__ , a__ : Dict = self.prepare_init_args_and_inputs_for_common()
a__ : Optional[int] = self.block_class(**A__ )
model.to(A__ )
model.train()
a__ : Dict = model(**A__ )
if isinstance(A__ , A__ ):
a__ : Tuple = output[0]
a__ : Dict = torch.device(A__ )
a__ : Union[str, Any] = randn_tensor(output.shape , device=A__ )
a__ : str = torch.nn.functional.mse_loss(A__ , A__ )
loss.backward()
| 688 |
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = (KDPMaDiscreteScheduler,)
__UpperCamelCase = 10
def __lowerCAmelCase ( self : Optional[Any] , **A__ : Optional[int] ) -> int:
'''simple docstring'''
a__ : Optional[int] = {
'''num_train_timesteps''': 1_1_0_0,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**A__ )
return config
def __lowerCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A__ )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=A__ , beta_end=A__ )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A__ )
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A__ )
def __lowerCAmelCase ( self : str ) -> Optional[int]:
'''simple docstring'''
a__ : Any = self.scheduler_classes[0]
a__ : str = self.get_scheduler_config(prediction_type='''v_prediction''' )
a__ : Dict = scheduler_class(**A__ )
scheduler.set_timesteps(self.num_inference_steps )
a__ : Tuple = self.dummy_model()
a__ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
a__ : Dict = sample.to(A__ )
for i, t in enumerate(scheduler.timesteps ):
a__ : Optional[Any] = scheduler.scale_model_input(A__ , A__ )
a__ : Union[str, Any] = model(A__ , A__ )
a__ : List[str] = scheduler.step(A__ , A__ , A__ )
a__ : Optional[Any] = output.prev_sample
a__ : Tuple = torch.sum(torch.abs(A__ ) )
a__ : Optional[int] = torch.mean(torch.abs(A__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0_002 ) < 1E-3
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
if torch_device == "mps":
return
a__ : List[Any] = self.scheduler_classes[0]
a__ : Tuple = self.get_scheduler_config()
a__ : Tuple = scheduler_class(**A__ )
scheduler.set_timesteps(self.num_inference_steps )
a__ : List[Any] = self.dummy_model()
a__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
a__ : Any = sample.to(A__ )
for i, t in enumerate(scheduler.timesteps ):
a__ : str = scheduler.scale_model_input(A__ , A__ )
a__ : List[str] = model(A__ , A__ )
a__ : str = scheduler.step(A__ , A__ , A__ )
a__ : List[Any] = output.prev_sample
a__ : Dict = torch.sum(torch.abs(A__ ) )
a__ : Optional[Any] = torch.mean(torch.abs(A__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
def __lowerCAmelCase ( self : str ) -> int:
'''simple docstring'''
if torch_device == "mps":
return
a__ : Optional[int] = self.scheduler_classes[0]
a__ : Tuple = self.get_scheduler_config()
a__ : List[Any] = scheduler_class(**A__ )
scheduler.set_timesteps(self.num_inference_steps , device=A__ )
a__ : Union[str, Any] = self.dummy_model()
a__ : List[Any] = self.dummy_sample_deter.to(A__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
a__ : Optional[int] = scheduler.scale_model_input(A__ , A__ )
a__ : List[Any] = model(A__ , A__ )
a__ : Any = scheduler.step(A__ , A__ , A__ )
a__ : List[str] = output.prev_sample
a__ : Any = torch.sum(torch.abs(A__ ) )
a__ : Union[str, Any] = torch.mean(torch.abs(A__ ) )
if str(A__ ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
| 688 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
__SCREAMING_SNAKE_CASE = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = 42
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[str] , A__ : PriorTransformer , A__ : CLIPVisionModel , A__ : CLIPImageProcessor , A__ : HeunDiscreteScheduler , A__ : ShapERenderer , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
prior=A__ , image_encoder=A__ , image_processor=A__ , scheduler=A__ , renderer=A__ , )
def __lowerCAmelCase ( self : str , A__ : Tuple , A__ : Tuple , A__ : Any , A__ : Dict , A__ : List[str] , A__ : List[str] ) -> Tuple:
'''simple docstring'''
if latents is None:
a__ : List[str] = randn_tensor(A__ , generator=A__ , device=A__ , dtype=A__ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
a__ : Union[str, Any] = latents.to(A__ )
a__ : Tuple = latents * scheduler.init_noise_sigma
return latents
def __lowerCAmelCase ( self : Any , A__ : Dict=0 ) -> Any:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
a__ : Optional[int] = torch.device(F'cuda:{gpu_id}' )
a__ : int = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A__ , A__ )
@property
def __lowerCAmelCase ( self : int ) -> Optional[int]:
'''simple docstring'''
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(A__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __lowerCAmelCase ( self : List[Any] , A__ : List[Any] , A__ : Union[str, Any] , A__ : Optional[Any] , A__ : str , ) -> Optional[int]:
'''simple docstring'''
if isinstance(A__ , A__ ) and isinstance(image[0] , torch.Tensor ):
a__ : List[Any] = torch.cat(A__ , axis=0 ) if image[0].ndim == 4 else torch.stack(A__ , axis=0 )
if not isinstance(A__ , torch.Tensor ):
a__ : Optional[int] = self.image_processor(A__ , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
a__ : Optional[int] = image.to(dtype=self.image_encoder.dtype , device=A__ )
a__ : str = self.image_encoder(A__ )['''last_hidden_state''']
a__ : int = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
a__ : List[str] = image_embeds.repeat_interleave(A__ , dim=0 )
if do_classifier_free_guidance:
a__ : Union[str, Any] = torch.zeros_like(A__ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
a__ : Any = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(A__ )
def __call__( self : Optional[int] , A__ : Union[PIL.Image.Image, List[PIL.Image.Image]] , A__ : int = 1 , A__ : int = 2_5 , A__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A__ : Optional[torch.FloatTensor] = None , A__ : float = 4.0 , A__ : int = 6_4 , A__ : Optional[str] = "pil" , A__ : bool = True , ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(A__ , PIL.Image.Image ):
a__ : int = 1
elif isinstance(A__ , torch.Tensor ):
a__ : Any = image.shape[0]
elif isinstance(A__ , A__ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
a__ : Dict = len(A__ )
else:
raise ValueError(
F'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(A__ )}' )
a__ : Dict = self._execution_device
a__ : List[str] = batch_size * num_images_per_prompt
a__ : Optional[int] = guidance_scale > 1.0
a__ : int = self._encode_image(A__ , A__ , A__ , A__ )
# prior
self.scheduler.set_timesteps(A__ , device=A__ )
a__ : Optional[int] = self.scheduler.timesteps
a__ : Tuple = self.prior.config.num_embeddings
a__ : str = self.prior.config.embedding_dim
a__ : Tuple = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , A__ , A__ , A__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
a__ : List[Any] = latents.reshape(latents.shape[0] , A__ , A__ )
for i, t in enumerate(self.progress_bar(A__ ) ):
# expand the latents if we are doing classifier free guidance
a__ : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a__ : Optional[int] = self.scheduler.scale_model_input(A__ , A__ )
a__ : int = self.prior(
A__ , timestep=A__ , proj_embedding=A__ , ).predicted_image_embedding
# remove the variance
a__ , a__ : List[str] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
a__ , a__ : Optional[int] = noise_pred.chunk(2 )
a__ : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
a__ : Optional[Any] = self.scheduler.step(
A__ , timestep=A__ , sample=A__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=A__ )
a__ : Any = []
for i, latent in enumerate(A__ ):
print()
a__ : int = self.renderer.decode(
latent[None, :] , A__ , size=A__ , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(A__ )
a__ : Optional[int] = torch.stack(A__ )
if output_type not in ["np", "pil"]:
raise ValueError(F'Only the output types `pil` and `np` are supported not output_type={output_type}' )
a__ : Optional[Any] = images.cpu().numpy()
if output_type == "pil":
a__ : Tuple = [self.numpy_to_pil(A__ ) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=A__ )
| 688 |
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
a__ : str = ['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
a__ , a__ : List[Any] = get_aligned_output_features_output_indices(A__ , A__ , A__ )
self.assertEqual(A__ , ['''c'''] )
self.assertEqual(A__ , [2] )
# Out indices set to match out features
a__ , a__ : Optional[int] = get_aligned_output_features_output_indices(['''a''', '''c'''] , A__ , A__ )
self.assertEqual(A__ , ['''a''', '''c'''] )
self.assertEqual(A__ , [0, 2] )
# Out features set to match out indices
a__ , a__ : int = get_aligned_output_features_output_indices(A__ , [0, 2] , A__ )
self.assertEqual(A__ , ['''a''', '''c'''] )
self.assertEqual(A__ , [0, 2] )
# Out features selected from negative indices
a__ , a__ : List[str] = get_aligned_output_features_output_indices(A__ , [-3, -1] , A__ )
self.assertEqual(A__ , ['''a''', '''c'''] )
self.assertEqual(A__ , [-3, -1] )
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , A__ )
# Out features must be a list
with self.assertRaises(A__ ):
verify_out_features_out_indices(('''a''', '''b''') , (0, 1) , ['''a''', '''b'''] )
# Out features must be a subset of stage names
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , ['''a'''] )
# Out indices must be a list or tuple
with self.assertRaises(A__ ):
verify_out_features_out_indices(A__ , 0 , ['''a''', '''b'''] )
# Out indices must be a subset of stage names
with self.assertRaises(A__ ):
verify_out_features_out_indices(A__ , (0, 1) , ['''a'''] )
# Out features and out indices must be the same length
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0,) , ['''a''', '''b''', '''c'''] )
# Out features should match out indices
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 2) , ['''a''', '''b''', '''c'''] )
# Out features and out indices should be in order
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''b''', '''a'''] , (0, 1) , ['''a''', '''b'''] )
# Check passes with valid inputs
verify_out_features_out_indices(['''a''', '''b''', '''d'''] , (0, 1, -1) , ['''a''', '''b''', '''c''', '''d'''] )
def __lowerCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
a__ : Optional[Any] = BackboneMixin()
a__ : int = ['''a''', '''b''', '''c''']
a__ : List[Any] = ['''a''', '''c''']
a__ : Tuple = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
a__ : Dict = ['''a''', '''b''']
self.assertEqual(backbone.out_features , ['''a''', '''b'''] )
self.assertEqual(backbone.out_indices , [0, 1] )
a__ : int = [-3, -1]
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 688 | 1 |
'''simple docstring'''
def __a ( lowerCAmelCase__ : bytes ):
return "".join([hex(lowerCAmelCase__ )[2:].zfill(2 ).upper() for byte in list(lowerCAmelCase__ )] )
def __a ( lowerCAmelCase__ : str ):
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowerCAmelCase__ ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowerCAmelCase__ ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowerCAmelCase__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __a ( lowerCAmelCase__ : List[Any] ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def __a ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any ):
a__ : Dict = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
a__ : Any = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' )
a__ : int = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' )
a__ : Optional[Any] = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' )
a__ : Dict = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' )
a__ : List[str] = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' )
a__ : List[Any] = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' )
a__ : str = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' )
a__ : List[Any] = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' )
a__ : List[Any] = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' )
a__ : str = key.replace('''image_encoder.module''' , '''flava.image_model''' )
a__ : Dict = key.replace('''text_encoder.module''' , '''flava.text_model''' )
a__ : List[Any] = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' )
a__ : List[str] = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' )
a__ : List[str] = key.replace('''text_projection''' , '''flava.text_projection''' )
a__ : Any = key.replace('''image_projection''' , '''flava.image_projection''' )
a__ : Any = value.float()
for key, value in codebook_state_dict.items():
a__ : List[str] = value
return upgrade
@torch.no_grad()
def __a ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict=None ):
if config_path is not None:
a__ : Tuple = FlavaConfig.from_pretrained(lowerCAmelCase__ )
else:
a__ : Optional[int] = FlavaConfig()
a__ : List[Any] = FlavaForPreTraining(lowerCAmelCase__ ).eval()
a__ : Optional[int] = convert_dalle_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , save_checkpoint=lowerCAmelCase__ )
if os.path.exists(lowerCAmelCase__ ):
a__ : List[str] = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
else:
a__ : Dict = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='''cpu''' )
a__ : List[Any] = upgrade_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
hf_model.load_state_dict(lowerCAmelCase__ )
a__ : Any = hf_model.state_dict()
a__ : Optional[Any] = count_parameters(lowerCAmelCase__ )
a__ : int = count_parameters(lowerCAmelCase__ ) + count_parameters(lowerCAmelCase__ )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 )
hf_model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 688 | 1 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[Any] ) -> Any:
'''simple docstring'''
a__ : Tuple = [1_0, 2_0, 3_0, 4_0, 5_0, 6_0]
a__ : Tuple = [2, 4, 6, 8, 1_0, 1_2]
a__ : Tuple = 1_0_0
self.assertEqual(kp.calc_profit(A__ , A__ , A__ ) , 2_1_0 )
def __lowerCAmelCase ( self : str ) -> str:
'''simple docstring'''
self.assertRaisesRegex(A__ , '''max_weight must greater than zero.''' )
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.assertRaisesRegex(A__ , '''Weight can not be negative.''' )
def __lowerCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
self.assertRaisesRegex(A__ , '''Profit can not be negative.''' )
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
self.assertRaisesRegex(A__ , '''max_weight must greater than zero.''' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
self.assertRaisesRegex(
A__ , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 688 |
'''simple docstring'''
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__SCREAMING_SNAKE_CASE = 4
__SCREAMING_SNAKE_CASE = 3
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
pass
def __a ( lowerCAmelCase__ : List[str] ):
for shard in shards:
for i in range(lowerCAmelCase__ ):
yield {"i": i, "shard": shard}
def __a ( ):
a__ : str = int(os.environ['''RANK'''] )
a__ : int = int(os.environ['''WORLD_SIZE'''] )
a__ : str = ArgumentParser()
parser.add_argument('''--streaming''' , type=lowerCAmelCase__ )
parser.add_argument('''--local_rank''' , type=lowerCAmelCase__ )
parser.add_argument('''--num_workers''' , type=lowerCAmelCase__ , default=0 )
a__ : int = parser.parse_args()
a__ : List[str] = args.streaming
a__ : Dict = args.num_workers
a__ : Dict = {'''shards''': [F'shard_{shard_idx}' for shard_idx in range(lowerCAmelCase__ )]}
a__ : Tuple = IterableDataset.from_generator(lowerCAmelCase__ , gen_kwargs=lowerCAmelCase__ )
if not streaming:
a__ : str = Dataset.from_list(list(lowerCAmelCase__ ) )
a__ : Optional[int] = split_dataset_by_node(lowerCAmelCase__ , rank=lowerCAmelCase__ , world_size=lowerCAmelCase__ )
a__ : Dict = torch.utils.data.DataLoader(lowerCAmelCase__ , num_workers=lowerCAmelCase__ )
a__ : str = NUM_SHARDS * NUM_ITEMS_PER_SHARD
a__ : Dict = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
a__ : str = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'local_size {local_size} != expected_local_size {expected_local_size}' )
if __name__ == "__main__":
main()
| 688 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = ["image_processor", "tokenizer"]
__UpperCamelCase = "BlipImageProcessor"
__UpperCamelCase = "AutoTokenizer"
def __init__( self : Dict , A__ : Tuple , A__ : str ) -> str:
'''simple docstring'''
a__ : Tuple = False
super().__init__(A__ , A__ )
a__ : str = self.image_processor
def __call__( self : Optional[int] , A__ : ImageInput = None , A__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , A__ : bool = True , A__ : Union[bool, str, PaddingStrategy] = False , A__ : Union[bool, str, TruncationStrategy] = None , A__ : Optional[int] = None , A__ : int = 0 , A__ : Optional[int] = None , A__ : Optional[bool] = None , A__ : bool = False , A__ : bool = False , A__ : bool = False , A__ : bool = False , A__ : bool = False , A__ : bool = True , A__ : Optional[Union[str, TensorType]] = None , **A__ : List[Any] , ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
a__ : List[Any] = self.tokenizer
a__ : List[Any] = self.tokenizer(
text=A__ , add_special_tokens=A__ , padding=A__ , truncation=A__ , max_length=A__ , stride=A__ , pad_to_multiple_of=A__ , return_attention_mask=A__ , return_overflowing_tokens=A__ , return_special_tokens_mask=A__ , return_offsets_mapping=A__ , return_token_type_ids=A__ , return_length=A__ , verbose=A__ , return_tensors=A__ , **A__ , )
return text_encoding
# add pixel_values
a__ : Union[str, Any] = self.image_processor(A__ , return_tensors=A__ )
if text is not None:
a__ : Optional[Any] = self.tokenizer(
text=A__ , add_special_tokens=A__ , padding=A__ , truncation=A__ , max_length=A__ , stride=A__ , pad_to_multiple_of=A__ , return_attention_mask=A__ , return_overflowing_tokens=A__ , return_special_tokens_mask=A__ , return_offsets_mapping=A__ , return_token_type_ids=A__ , return_length=A__ , verbose=A__ , return_tensors=A__ , **A__ , )
else:
a__ : Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(A__ )
return encoding_image_processor
def __lowerCAmelCase ( self : int , *A__ : Optional[Any] , **A__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*A__ , **A__ )
def __lowerCAmelCase ( self : List[str] , *A__ : List[Any] , **A__ : Union[str, Any] ) -> Any:
'''simple docstring'''
return self.tokenizer.decode(*A__ , **A__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
'''simple docstring'''
a__ : Dict = self.tokenizer.model_input_names
a__ : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 688 |
'''simple docstring'''
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__SCREAMING_SNAKE_CASE = open # noqa: we just need to have a builtin inside this module to test it properly
| 688 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'sentencepiece.model'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
__SCREAMING_SNAKE_CASE = {
'google/rembert': 2_5_6,
}
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , A__ : Any , A__ : List[str]=False , A__ : List[Any]=True , A__ : Optional[Any]=True , A__ : List[str]="[CLS]" , A__ : Optional[Any]="[SEP]" , A__ : Dict="[UNK]" , A__ : int="[SEP]" , A__ : Optional[Any]="[PAD]" , A__ : Union[str, Any]="[CLS]" , A__ : Dict="[MASK]" , **A__ : Optional[int] , ) -> Tuple:
'''simple docstring'''
super().__init__(
do_lower_case=A__ , remove_space=A__ , keep_accents=A__ , bos_token=A__ , eos_token=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , **A__ , )
a__ : Any = do_lower_case
a__ : int = remove_space
a__ : Union[str, Any] = keep_accents
a__ : Union[str, Any] = vocab_file
a__ : str = spm.SentencePieceProcessor()
self.sp_model.Load(A__ )
@property
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
return len(self.sp_model )
def __lowerCAmelCase ( self : int ) -> List[Any]:
'''simple docstring'''
a__ : Optional[Any] = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ) -> Tuple:
'''simple docstring'''
a__ : List[str] = self.__dict__.copy()
a__ : Optional[int] = None
return state
def __setstate__( self : Union[str, Any] , A__ : Tuple ) -> List[str]:
'''simple docstring'''
a__ : int = d
a__ : Any = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self : int , A__ : Optional[int] , A__ : str=False ) -> int:
'''simple docstring'''
a__ : Optional[int] = self.sp_model.EncodeAsPieces(A__ )
return pieces
def __lowerCAmelCase ( self : Any , A__ : Optional[int] ) -> List[str]:
'''simple docstring'''
return self.sp_model.PieceToId(A__ )
def __lowerCAmelCase ( self : int , A__ : Tuple ) -> Tuple:
'''simple docstring'''
return self.sp_model.IdToPiece(A__ )
def __lowerCAmelCase ( self : str , A__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
a__ : int = self.sp_model.decode_pieces(A__ )
return out_string
def __lowerCAmelCase ( self : int , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : int = [self.sep_token_id]
a__ : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCAmelCase ( self : List[Any] , A__ : List[int] , A__ : Optional[List[int]] = None , A__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(A__ )) + [1] + ([0] * len(A__ )) + [1]
return [1] + ([0] * len(A__ )) + [1]
def __lowerCAmelCase ( self : Optional[Any] , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : Tuple = [self.sep_token_id]
a__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : List[Any] , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(A__ ) )
return
a__ : Optional[Any] = os.path.join(
A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ):
copyfile(self.vocab_file , A__ )
return (out_vocab_file,)
| 688 |
'''simple docstring'''
import enum
import shutil
import sys
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = shutil.get_terminal_size()
__SCREAMING_SNAKE_CASE = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class lowerCAmelCase__ ( enum.Enum ):
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 1
def __a ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict="" ):
sys.stdout.write(str(lowerCAmelCase__ ) + end )
sys.stdout.flush()
def __a ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : int="" ):
forceWrite(F'\u001b[{color}m{content}\u001b[0m' , lowerCAmelCase__ )
def __a ( ):
forceWrite('''\r''' )
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : str ):
forceWrite(F'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def __a ( ):
forceWrite(''' ''' * TERMINAL_WIDTH )
reset_cursor()
def __a ( ):
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH )
| 688 | 1 |
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). " , lowerCAmelCase_ , )
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = RobertaConfig
__UpperCamelCase = "roberta"
def __init__( self : str , A__ : List[str] ) -> Tuple:
'''simple docstring'''
super().__init__(A__ )
a__ : Union[str, Any] = RobertaEmbeddings(A__ )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. " , lowerCAmelCase_ , )
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = RobertaConfig
__UpperCamelCase = "roberta"
def __init__( self : Optional[Any] , A__ : str ) -> int:
'''simple docstring'''
super().__init__(A__ )
a__ : List[str] = config.num_labels
a__ : Tuple = config.num_hidden_layers
a__ : Optional[Any] = DeeRobertaModel(A__ )
a__ : str = nn.Dropout(config.hidden_dropout_prob )
a__ : int = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(A__ )
def __lowerCAmelCase ( self : Optional[Any] , A__ : str=None , A__ : Dict=None , A__ : Tuple=None , A__ : Optional[Any]=None , A__ : Optional[Any]=None , A__ : Optional[Any]=None , A__ : List[str]=None , A__ : List[str]=-1 , A__ : Any=False , ) -> List[Any]:
'''simple docstring'''
a__ : Any = self.num_layers
try:
a__ : List[Any] = self.roberta(
A__ , attention_mask=A__ , token_type_ids=A__ , position_ids=A__ , head_mask=A__ , inputs_embeds=A__ , )
a__ : Tuple = outputs[1]
a__ : Union[str, Any] = self.dropout(A__ )
a__ : List[str] = self.classifier(A__ )
a__ : Optional[int] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
a__ : str = e.message
a__ : Optional[Any] = e.exit_layer
a__ : str = outputs[0]
if not self.training:
a__ : Tuple = entropy(A__ )
a__ : List[str] = []
a__ : Tuple = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
a__ : int = MSELoss()
a__ : Optional[int] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
a__ : str = CrossEntropyLoss()
a__ : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
a__ : List[str] = []
for highway_exit in outputs[-1]:
a__ : Optional[int] = highway_exit[0]
if not self.training:
highway_logits_all.append(A__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
a__ : Dict = MSELoss()
a__ : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
a__ : List[Any] = CrossEntropyLoss()
a__ : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(A__ )
if train_highway:
a__ : Optional[int] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
a__ : Any = (loss,) + outputs
if not self.training:
a__ : Tuple = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
a__ : Optional[Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 688 |
'''simple docstring'''
import inspect
import unittest
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Dict ) -> Dict:
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def __lowerCAmelCase ( self : int ) -> str:
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
a__ : Optional[int] = inspect.getmembers(A__ , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
a__ : int = '''k-diffusion'''
elif backend == "invisible_watermark":
a__ : int = '''invisible-watermark'''
assert backend in deps, F'{backend} is not in the deps table!'
| 688 | 1 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __a ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple ):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
a__ : Any = flax_key_tuple[:-1] + ('''weight''',)
a__ : List[Any] = torch.permute(lowerCAmelCase__ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCAmelCase__ ):
# linear layer
a__ : List[str] = flax_key_tuple[:-1] + ('''weight''',)
a__ : List[Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
a__ : Union[str, Any] = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def __a ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] ):
if "metadata" in layer:
a__ : Optional[int] = layer.split('''metadata''' )
a__ : Tuple = ''''''.join(split_layer[0] )[:-1]
a__ : List[Any] = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
a__ : Optional[Any] = layer.split('''kvstore''' )
a__ : List[str] = ''''''.join(split_layer[0] )[:-1]
a__ : Dict = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
a__ : Any = layer.split('''/''' )
a__ : str = '''/'''.join(split_layer[:-1] )
a__ : Tuple = (split_layer[-1],)
if "kvstore/path" in layer:
a__ : List[Any] = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
a__ : Optional[Any] = '''file'''
else:
a__ : List[Any] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] ):
a__ : int = rename_keys(lowerCAmelCase__ )
a__ : Dict = {}
for k, v in current_block.items():
a__ : str = v
a__ : Union[str, Any] = new_current_block
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str = WEIGHTS_NAME ):
a__ : str = convert_file_size_to_int(lowerCAmelCase__ )
a__ : Optional[Any] = []
a__ : int = {}
a__ : Any = 0
a__ : List[str] = 0
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
a__ : Tuple = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
a__ : List[str] = flatten_dict(lowerCAmelCase__ , sep='''/''' )
a__ : List[Any] = {}
for layer in checkpoint_info.keys():
a__ , a__ , a__ : Dict = get_key_and_tensorstore_dict(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if curr_real_layer_name in all_layers:
a__ : Union[str, Any] = content
else:
a__ : Union[str, Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
a__ : Dict = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
a__ : str = torch.tensor(lowerCAmelCase__ )
a__ : int = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
a__ , a__ : Dict = rename_base_flax_keys(tuple(key.split('''/''' ) ) , lowerCAmelCase__ )
a__ : Any = '''/'''.join(lowerCAmelCase__ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
a__ : List[str] = os.path.join(
lowerCAmelCase__ , weights_name.replace('''.bin''' , F'-{len(lowerCAmelCase__ )+1:05d}-of-???.bin' ) )
rename_and_save_block(lowerCAmelCase__ , lowerCAmelCase__ )
sharded_state_dicts.append(current_block.keys() )
del current_block
a__ : Any = {}
a__ : List[Any] = 0
a__ : Dict = raw_weights.to(getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
a__ : int = os.path.join(lowerCAmelCase__ , weights_name.replace('''.bin''' , F'-{len(lowerCAmelCase__ )+1:05d}-of-???.bin' ) )
rename_and_save_block(lowerCAmelCase__ , lowerCAmelCase__ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowerCAmelCase__ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
a__ : List[Any] = {}
a__ : Union[str, Any] = {}
for idx, shard in enumerate(lowerCAmelCase__ ):
a__ : Any = weights_name.replace(
'''.bin''' , F'-{idx+1:05d}-of-{len(lowerCAmelCase__ ):05d}.bin' ) # len(sharded_state_dicts):05d}
a__ : List[Any] = os.path.join(lowerCAmelCase__ , weights_name.replace('''.bin''' , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) )
a__ : Tuple = shard
for key in shard:
a__ : List[Any] = shard_file
# Add the metadata
a__ : Dict = {'''total_size''': total_size}
a__ : str = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , '''w''' , encoding='''utf-8''' ) as f:
a__ : int = json.dumps(lowerCAmelCase__ , indent=2 , sort_keys=lowerCAmelCase__ ) + '''\n'''
f.write(lowerCAmelCase__ )
return metadata, index
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __a ( ):
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
a__ : Tuple = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
a__ : List[str] = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
a__ : Dict = TaTokenizer.from_pretrained('''t5-small''' )
a__ : Optional[Any] = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
a__ : Union[str, Any] = tokenizer(lowerCAmelCase__ , return_tensors='''pt''' ).input_ids
a__ : Dict = model.generate(lowerCAmelCase__ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 688 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __a ( lowerCAmelCase__ : Dict ):
a__ , a__ : int = image.size
a__ , a__ : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
a__ : Tuple = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
a__ : List[Any] = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 255.0
a__ : Any = image[None].transpose(0 , 3 , 1 , 2 )
a__ : Dict = torch.from_numpy(lowerCAmelCase__ )
return 2.0 * image - 1.0
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , A__ : VQModel , A__ : UNetaDModel , A__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=A__ , unet=A__ , scheduler=A__ )
@torch.no_grad()
def __call__( self : List[str] , A__ : Union[torch.Tensor, PIL.Image.Image] = None , A__ : Optional[int] = 1 , A__ : Optional[int] = 1_0_0 , A__ : Optional[float] = 0.0 , A__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A__ : Optional[str] = "pil" , A__ : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
if isinstance(A__ , PIL.Image.Image ):
a__ : List[Any] = 1
elif isinstance(A__ , torch.Tensor ):
a__ : List[str] = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(A__ )}' )
if isinstance(A__ , PIL.Image.Image ):
a__ : Union[str, Any] = preprocess(A__ )
a__ , a__ : Dict = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
a__ : Optional[int] = (batch_size, self.unet.config.in_channels // 2, height, width)
a__ : Optional[int] = next(self.unet.parameters() ).dtype
a__ : List[str] = randn_tensor(A__ , generator=A__ , device=self.device , dtype=A__ )
a__ : Any = image.to(device=self.device , dtype=A__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(A__ , device=self.device )
a__ : int = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
a__ : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a__ : Union[str, Any] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a__ : str = {}
if accepts_eta:
a__ : Dict = eta
for t in self.progress_bar(A__ ):
# concat latents and low resolution image in the channel dimension.
a__ : str = torch.cat([latents, image] , dim=1 )
a__ : Optional[Any] = self.scheduler.scale_model_input(A__ , A__ )
# predict the noise residual
a__ : Union[str, Any] = self.unet(A__ , A__ ).sample
# compute the previous noisy sample x_t -> x_t-1
a__ : Union[str, Any] = self.scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
# decode the image latents with the VQVAE
a__ : List[Any] = self.vqvae.decode(A__ ).sample
a__ : List[Any] = torch.clamp(A__ , -1.0 , 1.0 )
a__ : Optional[Any] = image / 2 + 0.5
a__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a__ : Union[str, Any] = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 688 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'sentencepiece.bpe.model'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
__SCREAMING_SNAKE_CASE = {
'moussaKam/mbarthez': 1_0_2_4,
'moussaKam/barthez': 1_0_2_4,
'moussaKam/barthez-orangesum-title': 1_0_2_4,
}
__SCREAMING_SNAKE_CASE = '▁'
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , A__ : Optional[int] , A__ : Dict="<s>" , A__ : Any="</s>" , A__ : Dict="</s>" , A__ : Any="<s>" , A__ : Any="<unk>" , A__ : Optional[Any]="<pad>" , A__ : int="<mask>" , A__ : Optional[Dict[str, Any]] = None , **A__ : List[Any] , ) -> None:
'''simple docstring'''
a__ : Optional[Any] = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else mask_token
a__ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A__ , eos_token=A__ , unk_token=A__ , sep_token=A__ , cls_token=A__ , pad_token=A__ , mask_token=A__ , sp_model_kwargs=self.sp_model_kwargs , **A__ , )
a__ : Union[str, Any] = vocab_file
a__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A__ ) )
a__ : Optional[int] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
a__ : List[Any] = len(self.sp_model ) - 1
a__ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __lowerCAmelCase ( self : List[str] , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a__ : Tuple = [self.cls_token_id]
a__ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self : List[str] , A__ : List[int] , A__ : Optional[List[int]] = None , A__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
if token_ids_a is None:
return [1] + ([0] * len(A__ )) + [1]
return [1] + ([0] * len(A__ )) + [1, 1] + ([0] * len(A__ )) + [1]
def __lowerCAmelCase ( self : List[Any] , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : List[Any] = [self.sep_token_id]
a__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
return len(self.sp_model )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
a__ : Optional[int] = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self : Optional[int] , A__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(A__ , out_type=A__ )
def __lowerCAmelCase ( self : int , A__ : Optional[int] ) -> str:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a__ : int = self.sp_model.PieceToId(A__ )
return spm_id if spm_id else self.unk_token_id
def __lowerCAmelCase ( self : List[str] , A__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(A__ )
def __lowerCAmelCase ( self : int , A__ : Any ) -> Any:
'''simple docstring'''
a__ : Optional[int] = []
a__ : Any = ''''''
a__ : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A__ ) + token
a__ : List[Any] = True
a__ : Any = []
else:
current_sub_tokens.append(A__ )
a__ : List[Any] = False
out_string += self.sp_model.decode(A__ )
return out_string.strip()
def __getstate__( self : List[str] ) -> Tuple:
'''simple docstring'''
a__ : Dict = self.__dict__.copy()
a__ : str = None
return state
def __setstate__( self : List[Any] , A__ : Any ) -> Optional[Any]:
'''simple docstring'''
a__ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a__ : List[Any] = {}
a__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a__ : List[str] = os.path.join(
A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A__ )
elif not os.path.isfile(self.vocab_file ):
with open(A__ , '''wb''' ) as fi:
a__ : str = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (out_vocab_file,)
| 688 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
__SCREAMING_SNAKE_CASE = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def __a ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : str=8 ):
a__ : Tuple = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
a__ : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , A__ : UNetaDConditionModel , A__ : DDPMScheduler , A__ : VQModel , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=A__ , scheduler=A__ , movq=A__ , )
a__ : Union[str, Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCAmelCase ( self : Optional[Any] , A__ : List[Any] , A__ : List[str] , A__ : Optional[Any] , A__ : Dict , A__ : Dict , A__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
if latents is None:
a__ : List[str] = randn_tensor(A__ , generator=A__ , device=A__ , dtype=A__ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
a__ : int = latents.to(A__ )
a__ : Tuple = latents * scheduler.init_noise_sigma
return latents
def __lowerCAmelCase ( self : Union[str, Any] , A__ : int=0 ) -> str:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
a__ : Union[str, Any] = torch.device(F'cuda:{gpu_id}' )
a__ : Union[str, Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A__ , A__ )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Tuple=0 ) -> Dict:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
a__ : int = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=A__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
a__ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
a__ , a__ : List[str] = cpu_offload_with_hook(A__ , A__ , prev_module_hook=A__ )
# We'll offload the last model manually.
a__ : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(A__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A__ )
def __call__( self : Any , A__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A__ : torch.FloatTensor , A__ : int = 5_1_2 , A__ : int = 5_1_2 , A__ : int = 1_0_0 , A__ : float = 4.0 , A__ : int = 1 , A__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A__ : Optional[torch.FloatTensor] = None , A__ : Optional[str] = "pil" , A__ : bool = True , ) -> str:
'''simple docstring'''
a__ : Optional[Any] = self._execution_device
a__ : List[str] = guidance_scale > 1.0
if isinstance(A__ , A__ ):
a__ : int = torch.cat(A__ , dim=0 )
if isinstance(A__ , A__ ):
a__ : Optional[int] = torch.cat(A__ , dim=0 )
if isinstance(A__ , A__ ):
a__ : int = torch.cat(A__ , dim=0 )
a__ : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
a__ : Tuple = image_embeds.repeat_interleave(A__ , dim=0 )
a__ : Optional[int] = negative_image_embeds.repeat_interleave(A__ , dim=0 )
a__ : Optional[int] = hint.repeat_interleave(A__ , dim=0 )
a__ : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A__ )
a__ : Tuple = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=A__ )
self.scheduler.set_timesteps(A__ , device=A__ )
a__ : int = self.scheduler.timesteps
a__ : str = self.movq.config.latent_channels
a__ , a__ : Optional[int] = downscale_height_and_width(A__ , A__ , self.movq_scale_factor )
# create initial latent
a__ : List[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A__ , A__ , A__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A__ ) ):
# expand the latents if we are doing classifier free guidance
a__ : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a__ : List[str] = {'''image_embeds''': image_embeds, '''hint''': hint}
a__ : Union[str, Any] = self.unet(
sample=A__ , timestep=A__ , encoder_hidden_states=A__ , added_cond_kwargs=A__ , return_dict=A__ , )[0]
if do_classifier_free_guidance:
a__ , a__ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
a__ , a__ : Dict = noise_pred.chunk(2 )
a__ , a__ : Optional[Any] = variance_pred.chunk(2 )
a__ : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
a__ : Union[str, Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
a__ , a__ : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
a__ : Union[str, Any] = self.scheduler.step(
A__ , A__ , A__ , generator=A__ , )[0]
# post-processing
a__ : Tuple = self.movq.decode(A__ , force_not_quantize=A__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
a__ : Union[str, Any] = image * 0.5 + 0.5
a__ : str = image.clamp(0 , 1 )
a__ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a__ : int = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 688 | 1 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger()
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = field(default_factory=lowerCAmelCase_ )
__UpperCamelCase = field(default_factory=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict , A__ : List[Any] , A__ : Tensor , A__ : Tensor ) -> str:
'''simple docstring'''
a__ : str = len(list(m.modules() ) ) == 1 or isinstance(A__ , nn.Convad ) or isinstance(A__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(A__ )
def __call__( self : List[str] , A__ : Tensor ) -> Dict:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(A__ )
[x.remove() for x in self.handles]
return self
@property
def __lowerCAmelCase ( self : int ) -> Any:
'''simple docstring'''
return list(filter(lambda A__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 1
__UpperCamelCase = field(default_factory=lowerCAmelCase_ )
__UpperCamelCase = field(default_factory=lowerCAmelCase_ )
__UpperCamelCase = True
def __call__( self : List[Any] , A__ : Tensor ) -> Union[str, Any]:
'''simple docstring'''
a__ : Tuple = Tracker(self.dest )(A__ ).parametrized
a__ : Optional[int] = Tracker(self.src )(A__ ).parametrized
a__ : int = list(filter(lambda A__ : type(A__ ) not in self.src_skip , A__ ) )
a__ : Optional[int] = list(filter(lambda A__ : type(A__ ) not in self.dest_skip , A__ ) )
if len(A__ ) != len(A__ ) and self.raise_if_mismatch:
raise Exception(
F'Numbers of operations are different. Source module has {len(A__ )} operations while'
F' destination module has {len(A__ )}.' )
for dest_m, src_m in zip(A__ , A__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
class lowerCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , A__ : nn.Module ) -> List[str]:
'''simple docstring'''
super().__init__()
a__ : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(('''conv1''', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('''block''' ), F'Unexpected layer name {k}'
a__ : str = len(A__ ) + 1
feature_blocks.append((F'res{block_index}', v) )
a__ : Tuple = nn.ModuleDict(A__ )
def __lowerCAmelCase ( self : Tuple , A__ : Tensor ) -> Dict:
'''simple docstring'''
return get_trunk_forward_outputs(
A__ , out_feat_keys=A__ , feature_blocks=self._feature_blocks , )
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __lowerCAmelCase ( self : Optional[Any] , A__ : str ) -> str:
'''simple docstring'''
a__ : str = x.split('''-''' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : int , A__ : str ) -> Callable[[], Tuple[nn.Module, Dict]]:
'''simple docstring'''
if x not in self:
a__ : Union[str, Any] = self.convert_name_to_timm(A__ )
a__ : Tuple = partial(lambda: (timm.create_model(A__ , pretrained=A__ ).eval(), None) )
else:
a__ : Optional[Any] = super().__getitem__(A__ )
return val
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __getitem__( self : Any , A__ : str ) -> Callable[[], nn.Module]:
'''simple docstring'''
if "seer" in x and "in1k" not in x:
a__ : Dict = RegNetModel
else:
a__ : Tuple = RegNetForImageClassification
return val
def __a ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Tuple[str, str]] ):
for from_key, to_key in keys:
a__ : Optional[int] = from_state_dict[from_key].clone()
print(F'Copied key={from_key} to={to_key}' )
return to_state_dict
def __a ( lowerCAmelCase__ : str , lowerCAmelCase__ : Callable[[], nn.Module] , lowerCAmelCase__ : Callable[[], nn.Module] , lowerCAmelCase__ : RegNetConfig , lowerCAmelCase__ : Path , lowerCAmelCase__ : bool = True , ):
print(F'Converting {name}...' )
with torch.no_grad():
a__ , a__ : Dict = from_model_func()
a__ : List[str] = our_model_func(lowerCAmelCase__ ).eval()
a__ : Dict = ModuleTransfer(src=lowerCAmelCase__ , dest=lowerCAmelCase__ , raise_if_mismatch=lowerCAmelCase__ )
a__ : List[Any] = torch.randn((1, 3, 224, 224) )
module_transfer(lowerCAmelCase__ )
if from_state_dict is not None:
a__ : Optional[int] = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
a__ : Dict = [('''0.clf.0.weight''', '''classifier.1.weight'''), ('''0.clf.0.bias''', '''classifier.1.bias''')]
a__ : List[str] = manually_copy_vissl_head(lowerCAmelCase__ , our_model.state_dict() , lowerCAmelCase__ )
our_model.load_state_dict(lowerCAmelCase__ )
a__ : Any = our_model(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
a__ : List[Any] = (
our_outputs.logits if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else our_outputs.last_hidden_state
)
a__ : str = from_model(lowerCAmelCase__ )
a__ : Union[str, Any] = from_output[-1] if type(lowerCAmelCase__ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
a__ : Dict = our_outputs.hidden_states[-1]
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add model''' , use_temp_dir=lowerCAmelCase__ , )
a__ : Union[str, Any] = 224 if '''seer''' not in name else 384
# we can use the convnext one
a__ : str = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' , size=lowerCAmelCase__ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add image processor''' , use_temp_dir=lowerCAmelCase__ , )
print(F'Pushed {name}' )
def __a ( lowerCAmelCase__ : Path , lowerCAmelCase__ : str = None , lowerCAmelCase__ : bool = True ):
a__ : Dict = '''imagenet-1k-id2label.json'''
a__ : Dict = 1000
a__ : Any = (1, num_labels)
a__ : Tuple = '''huggingface/label-files'''
a__ : Dict = num_labels
a__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='''dataset''' ) ) , '''r''' ) )
a__ : Tuple = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
a__ : Optional[int] = idalabel
a__ : int = {v: k for k, v in idalabel.items()}
a__ : List[Any] = partial(lowerCAmelCase__ , num_labels=lowerCAmelCase__ , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ )
a__ : Dict = {
'''regnet-x-002''': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='''x''' ),
'''regnet-x-004''': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-016''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-032''': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type='''x''' ),
'''regnet-x-040''': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type='''x''' ),
'''regnet-x-064''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type='''x''' ),
'''regnet-x-080''': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type='''x''' ),
'''regnet-x-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type='''x''' ),
'''regnet-x-160''': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type='''x''' ),
'''regnet-x-320''': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type='''x''' ),
# y variant
'''regnet-y-002''': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
'''regnet-y-004''': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
'''regnet-y-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
'''regnet-y-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
'''regnet-y-016''': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
'''regnet-y-032''': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
'''regnet-y-040''': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
'''regnet-y-064''': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
'''regnet-y-080''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
'''regnet-y-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
'''regnet-y-160''': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
'''regnet-y-320''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'''regnet-y-320-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
'''regnet-y-640-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
'''regnet-y-1280-seer''': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
'''regnet-y-2560-seer''': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
'''regnet-y-10b-seer''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
# finetuned on imagenet
'''regnet-y-320-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
'''regnet-y-640-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
'''regnet-y-1280-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
'''regnet-y-2560-seer-in1k''': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
'''regnet-y-10b-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
}
a__ : Optional[Any] = NameToOurModelFuncMap()
a__ : Optional[int] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(lowerCAmelCase__ : str , lowerCAmelCase__ : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
a__ : Union[str, Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , model_dir=str(lowerCAmelCase__ ) , map_location='''cpu''' )
a__ : Tuple = model_func()
# check if we have a head, if yes add it
a__ : List[Any] = files['''classy_state_dict''']['''base_model''']['''model''']
a__ : List[str] = model_state_dict['''trunk''']
model.load_state_dict(lowerCAmelCase__ )
return model.eval(), model_state_dict["heads"]
# pretrained
a__ : Union[str, Any] = partial(
lowerCAmelCase__ , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
a__ : Optional[int] = partial(
lowerCAmelCase__ , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
a__ : Tuple = partial(
lowerCAmelCase__ , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
a__ : Dict = partial(
lowerCAmelCase__ , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
a__ : List[Any] = partial(
lowerCAmelCase__ , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
a__ : str = partial(
lowerCAmelCase__ , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
a__ : Union[str, Any] = partial(
lowerCAmelCase__ , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
a__ : Tuple = partial(
lowerCAmelCase__ , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
lowerCAmelCase__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , lowerCAmelCase__ , lowerCAmelCase__ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
lowerCAmelCase__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
return config, expected_shape
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported regnet* architecture,'
' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 688 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
__SCREAMING_SNAKE_CASE = {
'facebook/esm2_t6_8M_UR50D': 1_0_2_4,
'facebook/esm2_t12_35M_UR50D': 1_0_2_4,
}
def __a ( lowerCAmelCase__ : Union[str, Any] ):
with open(lowerCAmelCase__ , '''r''' ) as f:
a__ : Optional[int] = f.read().splitlines()
return [l.strip() for l in lines]
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self : List[str] , A__ : int , A__ : Union[str, Any]="<unk>" , A__ : Tuple="<cls>" , A__ : List[Any]="<pad>" , A__ : Optional[int]="<mask>" , A__ : List[Any]="<eos>" , **A__ : Optional[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**A__ )
a__ : Union[str, Any] = load_vocab_file(A__ )
a__ : int = dict(enumerate(self.all_tokens ) )
a__ : str = {tok: ind for ind, tok in enumerate(self.all_tokens )}
a__ : List[Any] = unk_token
a__ : Any = cls_token
a__ : Any = pad_token
a__ : Any = mask_token
a__ : Any = eos_token
a__ : int = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __lowerCAmelCase ( self : Any , A__ : int ) -> str:
'''simple docstring'''
return self._id_to_token.get(A__ , self.unk_token )
def __lowerCAmelCase ( self : Optional[Any] , A__ : str ) -> int:
'''simple docstring'''
return self._token_to_id.get(A__ , self._token_to_id.get(self.unk_token ) )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Tuple , **A__ : str ) -> List[Any]:
'''simple docstring'''
return text.split()
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Optional[int]=False ) -> Tuple:
'''simple docstring'''
return len(self._id_to_token )
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def __lowerCAmelCase ( self : Any , A__ : str ) -> int:
'''simple docstring'''
return self._token_to_id.get(A__ , self._token_to_id.get(self.unk_token ) )
def __lowerCAmelCase ( self : List[Any] , A__ : int ) -> str:
'''simple docstring'''
return self._id_to_token.get(A__ , self.unk_token )
def __lowerCAmelCase ( self : str , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : Tuple = [self.cls_token_id]
a__ : Union[str, Any] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __lowerCAmelCase ( self : Tuple , A__ : List , A__ : Optional[List] = None , A__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
a__ : Any = [1] + ([0] * len(A__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(A__ ) + [1]
return mask
def __lowerCAmelCase ( self : Any , A__ : Dict , A__ : Dict ) -> List[Any]:
'''simple docstring'''
a__ : Union[str, Any] = os.path.join(A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(A__ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __lowerCAmelCase ( self : Any ) -> int:
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=A__ )
def __lowerCAmelCase ( self : List[str] , A__ : Union[List[str], List[AddedToken]] , A__ : bool = False ) -> int:
'''simple docstring'''
return super()._add_tokens(A__ , special_tokens=A__ )
| 688 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , A__ : Optional[Any] , A__ : Optional[int]=7 , A__ : str=3 , A__ : Tuple=1_8 , A__ : Union[str, Any]=3_0 , A__ : List[str]=4_0_0 , A__ : str=True , A__ : List[Any]=None , A__ : Optional[int]=True , ) -> int:
'''simple docstring'''
a__ : Dict = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
a__ : Dict = parent
a__ : List[Any] = batch_size
a__ : Optional[int] = num_channels
a__ : Optional[int] = image_size
a__ : Optional[int] = min_resolution
a__ : Any = max_resolution
a__ : int = do_resize
a__ : Tuple = size
a__ : Union[str, Any] = apply_ocr
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __lowerCAmelCase ( self : int ) -> Any:
'''simple docstring'''
a__ : List[Any] = LayoutLMvaImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : Tuple ) -> Tuple:
'''simple docstring'''
a__ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , '''do_resize''' ) )
self.assertTrue(hasattr(A__ , '''size''' ) )
self.assertTrue(hasattr(A__ , '''apply_ocr''' ) )
def __lowerCAmelCase ( self : str ) -> Any:
'''simple docstring'''
a__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8} )
a__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2} )
def __lowerCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : str ) -> Any:
'''simple docstring'''
a__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
a__ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , A__ )
self.assertIsInstance(encoding.boxes , A__ )
# Test batched
a__ : Any = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __lowerCAmelCase ( self : Any ) -> str:
'''simple docstring'''
a__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
a__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
a__ : Optional[Any] = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
a__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
a__ : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
a__ : Optional[Any] = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
'''simple docstring'''
a__ : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
a__ : int = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
a__ : str = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
a__ : Any = image_processing(A__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
a__ : str = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
a__ : List[str] = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A__ )
self.assertListEqual(encoding.boxes , A__ )
# with apply_OCR = False
a__ : Dict = LayoutLMvaImageProcessor(apply_ocr=A__ )
a__ : List[Any] = image_processing(A__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
| 688 |
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
__SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : str ) -> Dict:
'''simple docstring'''
a__ : List[str] = False
def __lowerCAmelCase ( self : Tuple , A__ : Optional[int] , A__ : Optional[Any] , A__ : List[str] , A__ : Tuple ) -> Optional[int]:
'''simple docstring'''
if not self.initialized:
a__ : Optional[Any] = RagRetriever(
A__ , question_encoder_tokenizer=A__ , generator_tokenizer=A__ , index=A__ , init_retrieval=A__ , )
a__ : Union[str, Any] = True
def __lowerCAmelCase ( self : Tuple ) -> Tuple:
'''simple docstring'''
self.retriever.index.init_index()
def __lowerCAmelCase ( self : List[Any] , A__ : List[Any] , A__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
a__ , a__ : Optional[Any] = self.retriever._main_retrieve(A__ , A__ )
return doc_ids, retrieved_doc_embeds
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : str , A__ : Optional[int] , A__ : List[Any] , A__ : List[Any] , A__ : str , A__ : Any=None ) -> Optional[Any]:
'''simple docstring'''
if index is not None and index.is_initialized() and len(A__ ) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''' )
super().__init__(
A__ , question_encoder_tokenizer=A__ , generator_tokenizer=A__ , index=A__ , init_retrieval=A__ , )
a__ : List[str] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(A__ , A__ , A__ , A__ )
for worker in self.retrieval_workers
] )
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
logger.info('''initializing retrieval''' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __lowerCAmelCase ( self : Optional[int] , A__ : Optional[int] , A__ : int ) -> Dict:
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
a__ : List[Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
a__ , a__ : Tuple = ray.get(random_worker.retrieve.remote(A__ , A__ ) )
else:
a__ , a__ : int = self._main_retrieve(A__ , A__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A__ )
@classmethod
def __lowerCAmelCase ( cls : int , A__ : Optional[Any] , A__ : Any=None , **A__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return super(A__ , cls ).get_tokenizers(A__ , A__ , **A__ )
@classmethod
def __lowerCAmelCase ( cls : int , A__ : Optional[int] , A__ : Union[str, Any] , A__ : Union[str, Any]=None , **A__ : Dict ) -> List[Any]:
'''simple docstring'''
a__ : Dict = kwargs.pop('''config''' , A__ ) or RagConfig.from_pretrained(A__ , **A__ )
a__ : Dict = RagTokenizer.from_pretrained(A__ , config=A__ )
a__ : str = rag_tokenizer.question_encoder
a__ : List[str] = rag_tokenizer.generator
if indexed_dataset is not None:
a__ : List[Any] = '''custom'''
a__ : List[Any] = CustomHFIndex(config.retrieval_vector_size , A__ )
else:
a__ : Optional[Any] = cls._build_index(A__ )
return cls(
A__ , question_encoder_tokenizer=A__ , generator_tokenizer=A__ , retrieval_workers=A__ , index=A__ , )
| 688 | 1 |
'''simple docstring'''
from maths.prime_factors import prime_factors
def __a ( lowerCAmelCase__ : int ):
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : List[str] = F'Input value of [number={number}] must be an integer'
raise TypeError(lowerCAmelCase__ )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(lowerCAmelCase__ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 |
'''simple docstring'''
def __a ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : int ):
a__ : List[str] = len(lowerCAmelCase__ )
a__ : int = [[0] * n for i in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
a__ : Dict = y_points[i]
for i in range(2 , lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : Any = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 | 1 |
'''simple docstring'''
from __future__ import annotations
from random import random
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Dict , A__ : int | None = None ) -> Optional[Any]:
'''simple docstring'''
a__ : Union[str, Any] = value
a__ : Optional[Any] = random()
a__ : Node | None = None
a__ : Node | None = None
def __repr__( self : Dict ) -> str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return F'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{F'{self.value}: {self.prior:.5}': (self.left, self.right)} , indent=1 )
def __str__( self : Dict ) -> str:
'''simple docstring'''
a__ : List[Any] = str(self.value ) + ''' '''
a__ : Dict = str(self.left or '''''' )
a__ : Tuple = str(self.right or '''''' )
return value + left + right
def __a ( lowerCAmelCase__ : Node | None , lowerCAmelCase__ : int ):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
a__ , a__ : Union[str, Any] = split(root.left , lowerCAmelCase__ )
return left, root
else:
a__ , a__ : Tuple = split(root.right , lowerCAmelCase__ )
return root, right
def __a ( lowerCAmelCase__ : Node | None , lowerCAmelCase__ : Node | None ):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
a__ : List[str] = merge(left.right , lowerCAmelCase__ )
return left
else:
a__ : int = merge(lowerCAmelCase__ , right.left )
return right
def __a ( lowerCAmelCase__ : Node | None , lowerCAmelCase__ : int ):
a__ : Union[str, Any] = Node(lowerCAmelCase__ )
a__ , a__ : Optional[int] = split(lowerCAmelCase__ , lowerCAmelCase__ )
return merge(merge(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
def __a ( lowerCAmelCase__ : Node | None , lowerCAmelCase__ : int ):
a__ , a__ : Dict = split(lowerCAmelCase__ , value - 1 )
a__ , a__ : Any = split(lowerCAmelCase__ , lowerCAmelCase__ )
return merge(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( lowerCAmelCase__ : Node | None ):
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def __a ( lowerCAmelCase__ : Node | None , lowerCAmelCase__ : str ):
for arg in args.split():
if arg[0] == "+":
a__ : Union[str, Any] = insert(lowerCAmelCase__ , int(arg[1:] ) )
elif arg[0] == "-":
a__ : Tuple = erase(lowerCAmelCase__ , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def __a ( ):
a__ : Union[str, Any] = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
a__ : Any = input()
while args != "q":
a__ : List[str] = interact_treap(lowerCAmelCase__ , lowerCAmelCase__ )
print(lowerCAmelCase__ )
a__ : Any = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 688 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = "swin2sr"
__UpperCamelCase = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Union[str, Any] , A__ : int=6_4 , A__ : List[Any]=1 , A__ : List[Any]=3 , A__ : Any=1_8_0 , A__ : Optional[int]=[6, 6, 6, 6, 6, 6] , A__ : Optional[int]=[6, 6, 6, 6, 6, 6] , A__ : Dict=8 , A__ : Any=2.0 , A__ : Optional[int]=True , A__ : Union[str, Any]=0.0 , A__ : Union[str, Any]=0.0 , A__ : List[str]=0.1 , A__ : Any="gelu" , A__ : Tuple=False , A__ : Optional[int]=0.02 , A__ : List[Any]=1E-5 , A__ : Any=2 , A__ : Union[str, Any]=1.0 , A__ : Dict="1conv" , A__ : Optional[Any]="pixelshuffle" , **A__ : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**A__ )
a__ : List[str] = image_size
a__ : Optional[Any] = patch_size
a__ : Dict = num_channels
a__ : Optional[int] = embed_dim
a__ : int = depths
a__ : Optional[int] = len(A__ )
a__ : Dict = num_heads
a__ : List[Any] = window_size
a__ : Optional[int] = mlp_ratio
a__ : Optional[int] = qkv_bias
a__ : Union[str, Any] = hidden_dropout_prob
a__ : Dict = attention_probs_dropout_prob
a__ : Union[str, Any] = drop_path_rate
a__ : int = hidden_act
a__ : int = use_absolute_embeddings
a__ : Dict = layer_norm_eps
a__ : List[str] = initializer_range
a__ : List[Any] = upscale
a__ : List[Any] = img_range
a__ : Optional[int] = resi_connection
a__ : int = upsampler
| 688 | 1 |
'''simple docstring'''
def __a ( lowerCAmelCase__ : str = "The quick brown fox jumps over the lazy dog" , ):
a__ : int = set()
# Replace all the whitespace in our sentence
a__ : Any = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(lowerCAmelCase__ ) == 26
def __a ( lowerCAmelCase__ : str = "The quick brown fox jumps over the lazy dog" , ):
a__ : Union[str, Any] = [False] * 26
for char in input_str:
if char.islower():
a__ : Tuple = True
elif char.isupper():
a__ : str = True
return all(lowerCAmelCase__ )
def __a ( lowerCAmelCase__ : str = "The quick brown fox jumps over the lazy dog" , ):
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def __a ( ):
from timeit import timeit
a__ : List[Any] = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=lowerCAmelCase__ ) )
print(timeit('''is_pangram_faster()''' , setup=lowerCAmelCase__ ) )
print(timeit('''is_pangram_fastest()''' , setup=lowerCAmelCase__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 688 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
a__ : int = 0
def __lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : List[Any] = Path(A__ ) / '''preprocessor_config.json'''
a__ : List[Any] = Path(A__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
a__ : Any = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : int = Path(A__ ) / '''preprocessor_config.json'''
a__ : Optional[Any] = Path(A__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
a__ : Tuple = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Dict = CLIPConfig()
# Create a dummy config file with image_proceesor_type
a__ : int = Path(A__ ) / '''preprocessor_config.json'''
a__ : Optional[int] = Path(A__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
a__ : List[Any] = AutoImageProcessor.from_pretrained(A__ ).to_dict()
config_dict.pop('''image_processor_type''' )
a__ : Union[str, Any] = CLIPImageProcessor(**A__ )
# save in new folder
model_config.save_pretrained(A__ )
config.save_pretrained(A__ )
a__ : Union[str, Any] = AutoImageProcessor.from_pretrained(A__ )
# make sure private variable is not incorrectly saved
a__ : Optional[Any] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Optional[int] = Path(A__ ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
a__ : Any = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
A__ , '''clip-base is not a local folder and is not a valid model identifier''' ):
a__ : str = AutoImageProcessor.from_pretrained('''clip-base''' )
def __lowerCAmelCase ( self : Optional[Any] ) -> int:
'''simple docstring'''
with self.assertRaisesRegex(
A__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
a__ : Tuple = AutoImageProcessor.from_pretrained(A__ , revision='''aaaaaa''' )
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
A__ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
a__ : Union[str, Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
with self.assertRaises(A__ ):
a__ : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A__ ):
a__ : Tuple = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
a__ : Tuple = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(A__ )
a__ : str = AutoImageProcessor.from_pretrained(A__ , trust_remote_code=A__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , A__ )
AutoImageProcessor.register(A__ , A__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A__ ):
AutoImageProcessor.register(A__ , A__ )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Optional[int] = Path(A__ ) / '''preprocessor_config.json'''
a__ : List[str] = Path(A__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
a__ : Tuple = CustomImageProcessor.from_pretrained(A__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(A__ )
a__ : Tuple = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
'''simple docstring'''
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = True
try:
AutoConfig.register('''custom''' , A__ )
AutoImageProcessor.register(A__ , A__ )
# If remote code is not set, the default is to use local
a__ : Dict = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
a__ : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
a__ : Optional[int] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(A__ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 688 | 1 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__SCREAMING_SNAKE_CASE = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
'''simple docstring'''
a__ : List[str] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
a__ : Optional[Any] = self.diffusers_dir
shutil.copy(
os.path.join(A__ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __lowerCAmelCase ( self : List[Any] ) -> Any:
'''simple docstring'''
a__ : Optional[Any] = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __lowerCAmelCase ( self : Dict , A__ : List[str] , A__ : Dict , A__ : List[str] , A__ : List[Any]=None ) -> Any:
'''simple docstring'''
a__ : Dict = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
a__ : Dict = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
a__ : Optional[int] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
a__ : Any = black.format_str(A__ , mode=A__ )
a__ : Union[str, Any] = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(A__ , '''w''' , newline='''\n''' ) as f:
f.write(A__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(A__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=A__ )
with open(A__ , '''r''' ) as f:
self.assertTrue(f.read() , A__ )
def __lowerCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
a__ : int = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(A__ , A__ )
def __lowerCAmelCase ( self : int ) -> Dict:
'''simple docstring'''
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , A__ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , A__ ) , )
# Copy consistency with a really long name
a__ : Tuple = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , F'{long_class_name}SchedulerOutput' , re.sub('''Bert''' , A__ , A__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , A__ , overwrite_result=re.sub('''DDPM''' , '''Test''' , A__ ) , )
| 688 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
__SCREAMING_SNAKE_CASE = get_logger(__name__)
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCamelCase = "dummy_data"
__UpperCamelCase = "datasets"
__UpperCamelCase = False
def __init__( self : Any , A__ : str , A__ : str , A__ : Union[Version, str] , A__ : Optional[str] = None , A__ : bool = False , A__ : bool = True , A__ : Optional[List[Callable]] = None , ) -> int:
'''simple docstring'''
a__ : Tuple = 0
a__ : Any = dataset_name
a__ : int = cache_dir
a__ : str = use_local_dummy_data
a__ : List[str] = config
# download_callbacks take a single url as input
a__ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
a__ : str = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
a__ : Optional[Any] = str(A__ )
# to be downloaded
a__ : Tuple = None
a__ : Tuple = None
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if self._dummy_file is None:
a__ : Dict = self.download_dummy_data()
return self._dummy_file
@property
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
a__ : int = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
a__ : str = cached_path(
A__ , cache_dir=self.cache_dir , extract_compressed_file=A__ , force_extract=A__ )
return os.path.join(A__ , self.dummy_file_name )
@property
def __lowerCAmelCase ( self : int ) -> Optional[int]:
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if self._bucket_url is None:
a__ : int = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
'''simple docstring'''
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Optional[int] , *A__ : int ) -> Union[str, Any]:
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
a__ : Tuple = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
a__ : Union[str, Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(A__ , A__ ):
return self.create_dummy_data_dict(A__ , A__ )
elif isinstance(A__ , (list, tuple) ):
return self.create_dummy_data_list(A__ , A__ )
else:
return self.create_dummy_data_single(A__ , A__ )
def __lowerCAmelCase ( self : List[str] , A__ : Any , *A__ : int ) -> Any:
'''simple docstring'''
return self.download_and_extract(A__ )
def __lowerCAmelCase ( self : Any , A__ : Optional[int] , A__ : Optional[Any] ) -> int:
'''simple docstring'''
return self.download_and_extract(A__ )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : int , *A__ : List[Any] , **A__ : str ) -> Optional[Any]:
'''simple docstring'''
return path
def __lowerCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
return {}
def __lowerCAmelCase ( self : int , A__ : Union[str, Any] , A__ : List[str] ) -> Any:
'''simple docstring'''
a__ : int = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(A__ , A__ ):
for single_url in single_urls:
download_callback(A__ )
else:
a__ : Dict = single_urls
download_callback(A__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(A__ , A__ ):
a__ : Optional[int] = [os.path.join(A__ , urllib.parse.quote_plus(Path(A__ ).name ) ) for x in single_urls]
else:
a__ : Optional[Any] = single_urls
a__ : Tuple = os.path.join(A__ , urllib.parse.quote_plus(Path(A__ ).name ) )
a__ : List[str] = value
# make sure that values are unique
if all(isinstance(A__ , A__ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
a__ : Optional[int] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __lowerCAmelCase ( self : Dict , A__ : str , A__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
a__ : str = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
a__ : Union[str, Any] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , A__ ) ) for url in data_url )
a__ : Optional[Any] = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
a__ : Dict = [data_url[0]] * len(A__ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(A__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
a__ : Optional[int] = os.path.join(A__ , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(A__ )
return dummy_data_list
def __lowerCAmelCase ( self : Dict , A__ : Dict , A__ : str ) -> Optional[int]:
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(A__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
a__ : Union[str, Any] = os.path.join(A__ , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(A__ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __lowerCAmelCase ( self : int ) -> str:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Any , A__ : Tuple ) -> Any:
'''simple docstring'''
def _iter_archive_members(A__ : str ):
# this preserves the order of the members inside the ZIP archive
a__ : Dict = Path(self.dummy_file ).parent
a__ : Tuple = path.relative_to(A__ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
a__ : Optional[Any] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(A__ )
a__ : str = Path(A__ )
a__ : Optional[Any] = _iter_archive_members(A__ ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(A__ ).as_posix(), file_path.open('''rb''' )
def __lowerCAmelCase ( self : Tuple , A__ : Tuple ) -> Tuple:
'''simple docstring'''
if not isinstance(A__ , A__ ):
a__ : int = [paths]
for path in paths:
if os.path.isfile(A__ ):
if os.path.basename(A__ ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(A__ ):
if os.path.basename(A__ ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(A__ ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(A__ , A__ )
| 688 | 1 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class lowerCAmelCase__ ( unittest.TestCase , lowerCAmelCase_ ):
"""simple docstring"""
def __lowerCAmelCase ( self : Dict ) -> Any:
'''simple docstring'''
a__ : int = load_tool('''text-to-speech''' )
self.tool.setup()
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
a__ : List[str] = self.tool('''hey''' )
a__ : Tuple = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
a__ : Any = self.tool('''hey''' )
a__ : Optional[Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 688 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = LxmertTokenizer
__UpperCamelCase = LxmertTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def __lowerCAmelCase ( self : str ) -> str:
'''simple docstring'''
super().setUp()
a__ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self : int , A__ : int ) -> int:
'''simple docstring'''
a__ : List[Any] = '''UNwant\u00E9d,running'''
a__ : Optional[int] = '''unwanted, running'''
return input_text, output_text
def __lowerCAmelCase ( self : int ) -> Dict:
'''simple docstring'''
a__ : Optional[int] = self.tokenizer_class(self.vocab_file )
a__ : List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [7, 4, 5, 1_0, 8, 9] )
def __lowerCAmelCase ( self : Any ) -> Dict:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a__ : Union[str, Any] = self.get_tokenizer()
a__ : Union[str, Any] = self.get_rust_tokenizer()
a__ : str = '''I was born in 92000, and this is falsé.'''
a__ : Tuple = tokenizer.tokenize(A__ )
a__ : Tuple = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
a__ : Optional[int] = tokenizer.encode(A__ , add_special_tokens=A__ )
a__ : Optional[Any] = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
a__ : List[str] = self.get_rust_tokenizer()
a__ : str = tokenizer.encode(A__ )
a__ : int = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
| 688 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Dict ) -> Any:
'''simple docstring'''
a__ : Dict = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
a__ : List[Any] = dict(zip(A__ , range(len(A__ ) ) ) )
a__ : List[Any] = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
a__ : Dict = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_6_0_0_0,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
a__ : Dict = tempfile.mkdtemp()
a__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a__ : Optional[Any] = os.path.join(self.tmpdirname , A__ )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A__ ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A__ ) + '''\n''' )
# load decoder from hub
a__ : List[str] = '''hf-internal-testing/ngram-beam-search-decoder'''
def __lowerCAmelCase ( self : Optional[int] , **A__ : Any ) -> List[Any]:
'''simple docstring'''
a__ : List[str] = self.add_kwargs_tokens_map.copy()
kwargs.update(A__ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **A__ )
def __lowerCAmelCase ( self : str , **A__ : Tuple ) -> int:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **A__ )
def __lowerCAmelCase ( self : Any , **A__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **A__ )
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
a__ : Optional[Any] = self.get_tokenizer()
a__ : Dict = self.get_feature_extractor()
a__ : Optional[Any] = self.get_decoder()
a__ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=A__ , feature_extractor=A__ , decoder=A__ )
processor.save_pretrained(self.tmpdirname )
a__ : Any = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A__ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A__ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , A__ )
def __lowerCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
a__ : Optional[Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
a__ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(A__ , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=A__ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def __lowerCAmelCase ( self : Dict ) -> Dict:
'''simple docstring'''
a__ : Optional[int] = self.get_feature_extractor()
a__ : Any = self.get_tokenizer()
a__ : Tuple = self.get_decoder()
a__ : List[Any] = WavaVecaProcessorWithLM(tokenizer=A__ , feature_extractor=A__ , decoder=A__ )
a__ : List[str] = floats_list((3, 1_0_0_0) )
a__ : List[str] = feature_extractor(A__ , return_tensors='''np''' )
a__ : Dict = processor(A__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[Any] = self.get_feature_extractor()
a__ : List[Any] = self.get_tokenizer()
a__ : Any = self.get_decoder()
a__ : int = WavaVecaProcessorWithLM(tokenizer=A__ , feature_extractor=A__ , decoder=A__ )
a__ : Optional[Any] = '''This is a test string'''
a__ : Optional[int] = processor(text=A__ )
a__ : Optional[int] = tokenizer(A__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self : List[Any] , A__ : int=(2, 1_0, 1_6) , A__ : Union[str, Any]=7_7 ) -> int:
'''simple docstring'''
np.random.seed(A__ )
return np.random.rand(*A__ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
a__ : Optional[int] = self.get_feature_extractor()
a__ : int = self.get_tokenizer()
a__ : Optional[int] = self.get_decoder()
a__ : List[Any] = WavaVecaProcessorWithLM(tokenizer=A__ , feature_extractor=A__ , decoder=A__ )
a__ : List[str] = self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 )
a__ : List[Any] = processor.decode(A__ )
a__ : Union[str, Any] = decoder.decode_beams(A__ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def __lowerCAmelCase ( self : Optional[int] , A__ : str ) -> Any:
'''simple docstring'''
a__ : str = self.get_feature_extractor()
a__ : List[Any] = self.get_tokenizer()
a__ : List[str] = self.get_decoder()
a__ : Any = WavaVecaProcessorWithLM(tokenizer=A__ , feature_extractor=A__ , decoder=A__ )
a__ : str = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
a__ : str = processor.batch_decode(A__ )
else:
with get_context(A__ ).Pool() as pool:
a__ : Optional[Any] = processor.batch_decode(A__ , A__ )
a__ : Dict = list(A__ )
with get_context('''fork''' ).Pool() as p:
a__ : Any = decoder.decode_beams_batch(A__ , A__ )
a__ , a__ , a__ : Union[str, Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(A__ , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(A__ , decoded_processor.logit_score )
self.assertListEqual(A__ , decoded_processor.lm_score )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
a__ : Dict = self.get_feature_extractor()
a__ : List[str] = self.get_tokenizer()
a__ : Tuple = self.get_decoder()
a__ : Any = WavaVecaProcessorWithLM(tokenizer=A__ , feature_extractor=A__ , decoder=A__ )
a__ : Optional[Any] = self._get_dummy_logits()
a__ : Dict = 1_5
a__ : Optional[Any] = -20.0
a__ : Tuple = -4.0
a__ : str = processor.batch_decode(
A__ , beam_width=A__ , beam_prune_logp=A__ , token_min_logp=A__ , )
a__ : List[Any] = decoded_processor_out.text
a__ : int = list(A__ )
with get_context('''fork''' ).Pool() as pool:
a__ : Tuple = decoder.decode_beams_batch(
A__ , A__ , beam_width=A__ , beam_prune_logp=A__ , token_min_logp=A__ , )
a__ : List[Any] = [d[0][0] for d in decoded_decoder_out]
a__ : str = [d[0][2] for d in decoded_decoder_out]
a__ : str = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(A__ , A__ )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , A__ )
self.assertTrue(np.array_equal(A__ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , A__ , atol=1E-3 ) )
self.assertTrue(np.array_equal(A__ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474] , A__ , atol=1E-3 ) )
def __lowerCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] = self.get_feature_extractor()
a__ : List[Any] = self.get_tokenizer()
a__ : str = self.get_decoder()
a__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=A__ , feature_extractor=A__ , decoder=A__ )
a__ : List[str] = self._get_dummy_logits()
a__ : Tuple = 2.0
a__ : Optional[Any] = 5.0
a__ : Optional[int] = -20.0
a__ : Tuple = True
a__ : int = processor.batch_decode(
A__ , alpha=A__ , beta=A__ , unk_score_offset=A__ , lm_score_boundary=A__ , )
a__ : Any = decoded_processor_out.text
a__ : Dict = list(A__ )
decoder.reset_params(
alpha=A__ , beta=A__ , unk_score_offset=A__ , lm_score_boundary=A__ , )
with get_context('''fork''' ).Pool() as pool:
a__ : Optional[int] = decoder.decode_beams_batch(
A__ , A__ , )
a__ : str = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(A__ , A__ )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , A__ )
a__ : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , A__ )
def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
a__ : str = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
a__ : str = processor.decoder.model_container[processor.decoder._model_key]
a__ : int = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
a__ : Tuple = os.listdir(A__ )
a__ : List[str] = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(A__ , A__ )
def __lowerCAmelCase ( self : Any ) -> int:
'''simple docstring'''
a__ : List[Any] = snapshot_download('''hf-internal-testing/processor_with_lm''' )
a__ : List[Any] = WavaVecaProcessorWithLM.from_pretrained(A__ )
a__ : List[Any] = processor.decoder.model_container[processor.decoder._model_key]
a__ : Optional[int] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
a__ : List[Any] = os.listdir(A__ )
a__ : int = os.listdir(A__ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(A__ , A__ )
def __lowerCAmelCase ( self : Any ) -> Tuple:
'''simple docstring'''
a__ : str = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
a__ : Tuple = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
a__ : Optional[int] = floats_list((3, 1_0_0_0) )
a__ : Any = processor_wavaveca(A__ , return_tensors='''np''' )
a__ : List[Any] = processor_auto(A__ , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
a__ : int = self._get_dummy_logits()
a__ : Optional[int] = processor_wavaveca.batch_decode(A__ )
a__ : List[Any] = processor_auto.batch_decode(A__ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def __lowerCAmelCase ( self : List[str] ) -> Any:
'''simple docstring'''
a__ : Tuple = self.get_feature_extractor()
a__ : Optional[int] = self.get_tokenizer()
a__ : List[str] = self.get_decoder()
a__ : Any = WavaVecaProcessorWithLM(tokenizer=A__ , feature_extractor=A__ , decoder=A__ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def __lowerCAmelCase ( A__ : List[str] , A__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] = [d[key] for d in offsets]
return retrieved_list
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
a__ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
a__ : List[str] = self._get_dummy_logits()[0]
a__ : Optional[Any] = processor.decode(A__ , output_word_offsets=A__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A__ , A__ ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def __lowerCAmelCase ( self : Optional[int] ) -> Any:
'''simple docstring'''
a__ : Any = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
a__ : Optional[Any] = self._get_dummy_logits()
a__ : str = processor.batch_decode(A__ , output_word_offsets=A__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A__ , A__ ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(A__ , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __lowerCAmelCase ( self : str ) -> Optional[int]:
'''simple docstring'''
import torch
a__ : Optional[int] = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=A__ )
a__ : Dict = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_6_0_0_0 ) )
a__ : str = iter(A__ )
a__ : Any = next(A__ )
a__ : Dict = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
a__ : Optional[Any] = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
a__ : Optional[Any] = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
a__ : Optional[Any] = model(A__ ).logits.cpu().numpy()
a__ : Tuple = processor.decode(logits[0] , output_word_offsets=A__ )
a__ : Optional[Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
a__ : Optional[int] = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
a__ : Optional[Any] = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(A__ , '''word''' ) ) , A__ )
self.assertEqual(''' '''.join(self.get_from_offsets(A__ , '''word''' ) ) , output.text )
# output times
a__ : List[Any] = torch.tensor(self.get_from_offsets(A__ , '''start_time''' ) )
a__ : Optional[Any] = torch.tensor(self.get_from_offsets(A__ , '''end_time''' ) )
# fmt: off
a__ : List[str] = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
a__ : Union[str, Any] = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(A__ , A__ , atol=0.01 ) )
self.assertTrue(torch.allclose(A__ , A__ , atol=0.01 ) )
| 688 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str ):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
a__ : Dict = TapasConfig.from_json_file(lowerCAmelCase__ )
# set absolute/relative position embeddings parameter
a__ : List[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
a__ : Optional[Any] = TapasForQuestionAnswering(config=lowerCAmelCase__ )
elif task == "WTQ":
# run_task_main.py hparams
a__ : List[str] = 4
a__ : Optional[int] = True
# hparam_utils.py hparams
a__ : List[Any] = 0.664694
a__ : List[Any] = 0.207951
a__ : Union[str, Any] = 0.121194
a__ : Optional[Any] = True
a__ : Optional[int] = True
a__ : List[str] = False
a__ : Union[str, Any] = 0.0352513
a__ : Any = TapasForQuestionAnswering(config=lowerCAmelCase__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
a__ : Tuple = 4
a__ : Dict = False
# hparam_utils.py hparams
a__ : str = 36.4519
a__ : str = 0.903421
a__ : Optional[Any] = 222.088
a__ : Dict = True
a__ : Dict = True
a__ : Dict = True
a__ : str = 0.763141
a__ : List[Any] = TapasForQuestionAnswering(config=lowerCAmelCase__ )
elif task == "TABFACT":
a__ : List[str] = TapasForSequenceClassification(config=lowerCAmelCase__ )
elif task == "MLM":
a__ : Tuple = TapasForMaskedLM(config=lowerCAmelCase__ )
elif task == "INTERMEDIATE_PRETRAINING":
a__ : List[str] = TapasModel(config=lowerCAmelCase__ )
else:
raise ValueError(F'Task {task} not supported.' )
print(F'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model (weights and configuration)
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowerCAmelCase__ )
# Save tokenizer files
print(F'Save tokenizer files to {pytorch_dump_path}' )
a__ : Optional[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + '''vocab.txt''' , model_max_length=512 )
tokenizer.save_pretrained(lowerCAmelCase__ )
print('''Used relative position embeddings:''' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 688 | 1 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def __a ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : int ):
a__ : int = x
a__ : List[str] = y
for step in range(lowerCAmelCase__ ): # noqa: B007
a__ : Any = a * a - b * b + x
a__ : Dict = 2 * a * b + y
a__ : Optional[Any] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __a ( lowerCAmelCase__ : float ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def __a ( lowerCAmelCase__ : float ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowerCAmelCase__ , 1 , 1 ) )
def __a ( lowerCAmelCase__ : int = 800 , lowerCAmelCase__ : int = 600 , lowerCAmelCase__ : float = -0.6 , lowerCAmelCase__ : float = 0 , lowerCAmelCase__ : float = 3.2 , lowerCAmelCase__ : int = 50 , lowerCAmelCase__ : bool = True , ):
a__ : List[str] = Image.new('''RGB''' , (image_width, image_height) )
a__ : Tuple = img.load()
# loop through the image-coordinates
for image_x in range(lowerCAmelCase__ ):
for image_y in range(lowerCAmelCase__ ):
# determine the figure-coordinates based on the image-coordinates
a__ : Optional[Any] = figure_width / image_width * image_height
a__ : Any = figure_center_x + (image_x / image_width - 0.5) * figure_width
a__ : Union[str, Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
a__ : Dict = get_distance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
a__ : Any = get_color_coded_rgb(lowerCAmelCase__ )
else:
a__ : List[str] = get_black_and_white_rgb(lowerCAmelCase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
__SCREAMING_SNAKE_CASE = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 688 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
__SCREAMING_SNAKE_CASE = {
'google/fnet-base': 5_1_2,
'google/fnet-large': 5_1_2,
}
__SCREAMING_SNAKE_CASE = '▁'
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "token_type_ids"]
__UpperCamelCase = FNetTokenizer
def __init__( self : Any , A__ : Any=None , A__ : int=None , A__ : List[str]=False , A__ : int=True , A__ : str=True , A__ : List[Any]="<unk>" , A__ : Dict="[SEP]" , A__ : List[str]="<pad>" , A__ : Union[str, Any]="[CLS]" , A__ : Dict="[MASK]" , **A__ : Tuple , ) -> List[str]:
'''simple docstring'''
a__ : Optional[int] = (
AddedToken(A__ , lstrip=A__ , rstrip=A__ , normalized=A__ )
if isinstance(A__ , A__ )
else mask_token
)
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , remove_space=A__ , keep_accents=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , **A__ , )
a__ : Optional[Any] = do_lower_case
a__ : Dict = remove_space
a__ : List[Any] = keep_accents
a__ : Optional[Any] = vocab_file
a__ : Any = False if not self.vocab_file else True
def __lowerCAmelCase ( self : str , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : Optional[int] = [self.sep_token_id]
a__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCAmelCase ( self : List[Any] , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : Dict = [self.sep_token_id]
a__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Tuple , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a__ : Union[str, Any] = os.path.join(
A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ):
copyfile(self.vocab_file , A__ )
return (out_vocab_file,)
| 688 | 1 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
# TODO Update this
__SCREAMING_SNAKE_CASE = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = "esm"
def __init__( self : Any , A__ : Optional[int]=None , A__ : str=None , A__ : List[str]=None , A__ : Union[str, Any]=7_6_8 , A__ : List[Any]=1_2 , A__ : Dict=1_2 , A__ : Any=3_0_7_2 , A__ : Union[str, Any]=0.1 , A__ : Optional[Any]=0.1 , A__ : Tuple=1_0_2_6 , A__ : Union[str, Any]=0.02 , A__ : Any=1E-12 , A__ : Union[str, Any]="absolute" , A__ : Any=True , A__ : Any=None , A__ : Any=False , A__ : List[str]=False , A__ : str=None , A__ : Optional[Any]=None , **A__ : List[str] , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=A__ , mask_token_id=A__ , **A__ )
a__ : Tuple = vocab_size
a__ : List[Any] = hidden_size
a__ : Optional[int] = num_hidden_layers
a__ : Tuple = num_attention_heads
a__ : List[str] = intermediate_size
a__ : int = hidden_dropout_prob
a__ : Optional[int] = attention_probs_dropout_prob
a__ : Dict = max_position_embeddings
a__ : Union[str, Any] = initializer_range
a__ : Optional[Any] = layer_norm_eps
a__ : List[Any] = position_embedding_type
a__ : Tuple = use_cache
a__ : Tuple = emb_layer_norm_before
a__ : List[str] = token_dropout
a__ : List[Any] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
a__ : Dict = EsmFoldConfig()
elif isinstance(A__ , A__ ):
a__ : Optional[int] = EsmFoldConfig(**A__ )
a__ : List[Any] = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
a__ : Optional[int] = get_default_vocab_list()
else:
a__ : Tuple = vocab_list
else:
a__ : Any = None
a__ : List[Any] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , A__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[int] = super().to_dict()
if isinstance(self.esmfold_config , A__ ):
a__ : List[str] = self.esmfold_config.to_dict()
return output
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCamelCase = None
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = 0
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = 128
__UpperCamelCase = None
def __lowerCAmelCase ( self : Tuple ) -> Any:
'''simple docstring'''
if self.trunk is None:
a__ : Optional[Any] = TrunkConfig()
elif isinstance(self.trunk , A__ ):
a__ : Tuple = TrunkConfig(**self.trunk )
def __lowerCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
a__ : int = asdict(self )
a__ : Tuple = self.trunk.to_dict()
return output
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCamelCase = 48
__UpperCamelCase = 1024
__UpperCamelCase = 128
__UpperCamelCase = 32
__UpperCamelCase = 32
__UpperCamelCase = 32
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = False
__UpperCamelCase = 4
__UpperCamelCase = 128
__UpperCamelCase = None
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.structure_module is None:
a__ : List[Any] = StructureModuleConfig()
elif isinstance(self.structure_module , A__ ):
a__ : Dict = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'`max_recycles` should be positive, got {self.max_recycles}.' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F' {self.sequence_state_dim} and {self.sequence_state_dim}.' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' )
a__ : List[Any] = self.sequence_state_dim // self.sequence_head_width
a__ : Optional[Any] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' )
if self.dropout >= 0.4:
raise ValueError(F'`dropout` should not be greater than 0.4, got {self.dropout}.' )
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
'''simple docstring'''
a__ : int = asdict(self )
a__ : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCamelCase = 384
__UpperCamelCase = 128
__UpperCamelCase = 16
__UpperCamelCase = 128
__UpperCamelCase = 12
__UpperCamelCase = 4
__UpperCamelCase = 8
__UpperCamelCase = 0.1
__UpperCamelCase = 8
__UpperCamelCase = 1
__UpperCamelCase = 2
__UpperCamelCase = 7
__UpperCamelCase = 10
__UpperCamelCase = 1e-8
__UpperCamelCase = 1e5
def __lowerCAmelCase ( self : Dict ) -> List[str]:
'''simple docstring'''
return asdict(self )
def __a ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 688 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-german-cased': (
'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'
),
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'
),
},
}
__SCREAMING_SNAKE_CASE = {
'distilbert-base-uncased': 5_1_2,
'distilbert-base-uncased-distilled-squad': 5_1_2,
'distilbert-base-cased': 5_1_2,
'distilbert-base-cased-distilled-squad': 5_1_2,
'distilbert-base-german-cased': 5_1_2,
'distilbert-base-multilingual-cased': 5_1_2,
}
__SCREAMING_SNAKE_CASE = {
'distilbert-base-uncased': {'do_lower_case': True},
'distilbert-base-uncased-distilled-squad': {'do_lower_case': True},
'distilbert-base-cased': {'do_lower_case': False},
'distilbert-base-cased-distilled-squad': {'do_lower_case': False},
'distilbert-base-german-cased': {'do_lower_case': False},
'distilbert-base-multilingual-cased': {'do_lower_case': False},
}
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = ["input_ids", "attention_mask"]
__UpperCamelCase = DistilBertTokenizer
def __init__( self : str , A__ : Optional[Any]=None , A__ : Any=None , A__ : Tuple=True , A__ : List[Any]="[UNK]" , A__ : List[str]="[SEP]" , A__ : Tuple="[PAD]" , A__ : Optional[int]="[CLS]" , A__ : Union[str, Any]="[MASK]" , A__ : List[str]=True , A__ : Any=None , **A__ : int , ) -> str:
'''simple docstring'''
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , tokenize_chinese_chars=A__ , strip_accents=A__ , **A__ , )
a__ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , A__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , A__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , A__ ) != tokenize_chinese_chars
):
a__ : int = getattr(A__ , normalizer_state.pop('''type''' ) )
a__ : List[Any] = do_lower_case
a__ : str = strip_accents
a__ : List[str] = tokenize_chinese_chars
a__ : Dict = normalizer_class(**A__ )
a__ : List[Any] = do_lower_case
def __lowerCAmelCase ( self : Tuple , A__ : List[str] , A__ : Dict=None ) -> List[str]:
'''simple docstring'''
a__ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : int , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : List[str] = [self.sep_token_id]
a__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : str , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
a__ : int = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
| 688 | 1 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = None
__UpperCamelCase = BloomTokenizerFast
__UpperCamelCase = BloomTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = "tokenizer_file"
__UpperCamelCase = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def __lowerCAmelCase ( self : Any ) -> List[Any]:
'''simple docstring'''
super().setUp()
a__ : List[Any] = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : str , **A__ : List[str] ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **A__ )
def __lowerCAmelCase ( self : str ) -> str:
'''simple docstring'''
a__ : List[Any] = self.get_rust_tokenizer()
a__ : Tuple = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
a__ : int = [[2_1_7_5, 2_3_7_1_4, 7_3_1_7_3, 1_4_4_2_5_2, 2], [7_7, 1_3_2_6_1_9, 3_4_7_8, 3_6_8, 1_0_9_5_8_6, 3_5_4_3_3, 2]]
a__ : List[Any] = tokenizer.batch_encode_plus(A__ )['''input_ids''']
self.assertListEqual(A__ , A__ )
a__ : Optional[int] = tokenizer.batch_decode(A__ )
self.assertListEqual(A__ , A__ )
def __lowerCAmelCase ( self : Any , A__ : str=6 ) -> Dict:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a__ : List[str] = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
a__ : str = '''This is a simple input'''
a__ : Union[str, Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
a__ : List[Any] = ('''This is a simple input''', '''This is a pair''')
a__ : Optional[int] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(A__ , max_length=A__ )
tokenizer_r.encode_plus(A__ , max_length=A__ )
tokenizer_r.batch_encode_plus(A__ , max_length=A__ )
tokenizer_r.encode(A__ , max_length=A__ )
tokenizer_r.batch_encode_plus(A__ , max_length=A__ )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
a__ : Dict = None # Hotfixing padding = None
self.assertRaises(A__ , tokenizer_r.encode , A__ , max_length=A__ , padding='''max_length''' )
# Simple input
self.assertRaises(A__ , tokenizer_r.encode_plus , A__ , max_length=A__ , padding='''max_length''' )
# Simple input
self.assertRaises(
A__ , tokenizer_r.batch_encode_plus , A__ , max_length=A__ , padding='''max_length''' , )
# Pair input
self.assertRaises(A__ , tokenizer_r.encode , A__ , max_length=A__ , padding='''max_length''' )
# Pair input
self.assertRaises(A__ , tokenizer_r.encode_plus , A__ , max_length=A__ , padding='''max_length''' )
# Pair input
self.assertRaises(
A__ , tokenizer_r.batch_encode_plus , A__ , max_length=A__ , padding='''max_length''' , )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
a__ : str = self.get_rust_tokenizer()
a__ : str = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=A__ )
a__ : Tuple = next(iter(A__ ) )['''premise'''] # pick up one data
a__ : List[Any] = list(sample_data.values() )
a__ : Tuple = list(map(tokenizer.encode , A__ ) )
a__ : Optional[int] = [tokenizer.decode(A__ , clean_up_tokenization_spaces=A__ ) for x in output_tokens]
self.assertListEqual(A__ , A__ )
def __lowerCAmelCase ( self : Tuple ) -> Any:
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 688 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__SCREAMING_SNAKE_CASE = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__SCREAMING_SNAKE_CASE = tuple[int, int]
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : str , A__ : int , A__ : int , A__ : int , A__ : int , A__ : int , A__ : Node | None , ) -> None:
'''simple docstring'''
a__ : Optional[int] = pos_x
a__ : str = pos_y
a__ : Optional[int] = (pos_y, pos_x)
a__ : List[str] = goal_x
a__ : Any = goal_y
a__ : Any = g_cost
a__ : Optional[int] = parent
a__ : Union[str, Any] = self.calculate_heuristic()
a__ : List[Any] = self.g_cost + self.h_cost
def __lowerCAmelCase ( self : Union[str, Any] ) -> float:
'''simple docstring'''
a__ : List[str] = self.pos_x - self.goal_x
a__ : List[str] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(A__ ) + abs(A__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : List[Any] , A__ : Node ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , A__ : TPosition , A__ : TPosition ) -> Optional[Any]:
'''simple docstring'''
a__ : int = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , A__ )
a__ : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , A__ )
a__ : Dict = [self.start]
a__ : list[Node] = []
a__ : str = False
def __lowerCAmelCase ( self : List[str] ) -> list[TPosition]:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
a__ : Dict = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(A__ )
self.closed_nodes.append(A__ )
a__ : List[Any] = self.get_successors(A__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(A__ )
else:
# retrieve the best current path
a__ : Optional[int] = self.open_nodes.pop(self.open_nodes.index(A__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(A__ )
else:
self.open_nodes.append(A__ )
return [self.start.pos]
def __lowerCAmelCase ( self : Optional[Any] , A__ : Node ) -> list[Node]:
'''simple docstring'''
a__ : Optional[int] = []
for action in delta:
a__ : List[Any] = parent.pos_x + action[1]
a__ : Tuple = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
A__ , A__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , A__ , ) )
return successors
def __lowerCAmelCase ( self : List[Any] , A__ : Node | None ) -> list[TPosition]:
'''simple docstring'''
a__ : Union[str, Any] = node
a__ : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
a__ : Any = current_node.parent
path.reverse()
return path
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , A__ : TPosition , A__ : TPosition ) -> None:
'''simple docstring'''
a__ : str = AStar(A__ , A__ )
a__ : Optional[int] = AStar(A__ , A__ )
a__ : List[str] = False
def __lowerCAmelCase ( self : Tuple ) -> list[TPosition]:
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
a__ : int = self.fwd_astar.open_nodes.pop(0 )
a__ : List[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
A__ , A__ )
self.fwd_astar.closed_nodes.append(A__ )
self.bwd_astar.closed_nodes.append(A__ )
a__ : Tuple = current_bwd_node
a__ : Optional[int] = current_fwd_node
a__ : Optional[int] = {
self.fwd_astar: self.fwd_astar.get_successors(A__ ),
self.bwd_astar: self.bwd_astar.get_successors(A__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(A__ )
else:
# retrieve the best current path
a__ : Optional[Any] = astar.open_nodes.pop(
astar.open_nodes.index(A__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(A__ )
else:
astar.open_nodes.append(A__ )
return [self.fwd_astar.start.pos]
def __lowerCAmelCase ( self : List[str] , A__ : Node , A__ : Node ) -> list[TPosition]:
'''simple docstring'''
a__ : str = self.fwd_astar.retrace_path(A__ )
a__ : List[str] = self.bwd_astar.retrace_path(A__ )
bwd_path.pop()
bwd_path.reverse()
a__ : Optional[int] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__SCREAMING_SNAKE_CASE = (0, 0)
__SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__SCREAMING_SNAKE_CASE = time.time()
__SCREAMING_SNAKE_CASE = AStar(init, goal)
__SCREAMING_SNAKE_CASE = a_star.search()
__SCREAMING_SNAKE_CASE = time.time() - start_time
print(f'AStar execution time = {end_time:f} seconds')
__SCREAMING_SNAKE_CASE = time.time()
__SCREAMING_SNAKE_CASE = BidirectionalAStar(init, goal)
__SCREAMING_SNAKE_CASE = time.time() - bd_start_time
print(f'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 688 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = ["image_processor", "tokenizer"]
__UpperCamelCase = "BridgeTowerImageProcessor"
__UpperCamelCase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : List[str] , A__ : int , A__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(A__ , A__ )
def __call__( self : Tuple , A__ : Optional[Any] , A__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , A__ : bool = True , A__ : Union[bool, str, PaddingStrategy] = False , A__ : Union[bool, str, TruncationStrategy] = None , A__ : Optional[int] = None , A__ : int = 0 , A__ : Optional[int] = None , A__ : Optional[bool] = None , A__ : Optional[bool] = None , A__ : bool = False , A__ : bool = False , A__ : bool = False , A__ : bool = False , A__ : bool = True , A__ : Optional[Union[str, TensorType]] = None , **A__ : List[str] , ) -> BatchEncoding:
'''simple docstring'''
a__ : str = self.tokenizer(
text=A__ , add_special_tokens=A__ , padding=A__ , truncation=A__ , max_length=A__ , stride=A__ , pad_to_multiple_of=A__ , return_token_type_ids=A__ , return_attention_mask=A__ , return_overflowing_tokens=A__ , return_special_tokens_mask=A__ , return_offsets_mapping=A__ , return_length=A__ , verbose=A__ , return_tensors=A__ , **A__ , )
# add pixel_values + pixel_mask
a__ : Union[str, Any] = self.image_processor(
A__ , return_tensors=A__ , do_normalize=A__ , do_center_crop=A__ , **A__ )
encoding.update(A__ )
return encoding
def __lowerCAmelCase ( self : int , *A__ : Optional[int] , **A__ : List[str] ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*A__ , **A__ )
def __lowerCAmelCase ( self : Union[str, Any] , *A__ : Optional[int] , **A__ : Dict ) -> Any:
'''simple docstring'''
return self.tokenizer.decode(*A__ , **A__ )
@property
def __lowerCAmelCase ( self : Tuple ) -> int:
'''simple docstring'''
a__ : List[Any] = self.tokenizer.model_input_names
a__ : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 688 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __a ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] ):
# Construct model
if gpta_config_file == "":
a__ : Union[str, Any] = GPTaConfig()
else:
a__ : Dict = GPTaConfig.from_json_file(lowerCAmelCase__ )
a__ : Optional[int] = GPTaModel(lowerCAmelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
a__ : int = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
a__ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 688 | 1 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__SCREAMING_SNAKE_CASE = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def __a ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : int ):
a__ : List[str] = state_dict.pop(lowerCAmelCase__ )
a__ : Dict = val
def __a ( lowerCAmelCase__ : Optional[Any] ):
a__ : Any = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
a__ : Optional[Any] = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
a__ : Tuple = value
else:
a__ : Tuple = value
return new_state_dict
def __a ( lowerCAmelCase__ : Optional[Any] ):
a__ : List[str] = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a__ : Union[str, Any] = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
a__ : Tuple = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
a__ : List[Any] = in_proj_weight[:256, :]
a__ : Dict = in_proj_bias[:256]
a__ : Union[str, Any] = in_proj_weight[256:512, :]
a__ : Any = in_proj_bias[256:512]
a__ : int = in_proj_weight[-256:, :]
a__ : Union[str, Any] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
a__ : List[str] = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
a__ : Any = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
a__ : str = in_proj_weight[:256, :]
a__ : str = in_proj_bias[:256]
a__ : Dict = in_proj_weight[256:512, :]
a__ : Optional[int] = in_proj_bias[256:512]
a__ : Optional[int] = in_proj_weight[-256:, :]
a__ : Union[str, Any] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
a__ : List[Any] = state_dict.pop(
F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
a__ : List[Any] = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
a__ : Optional[Any] = in_proj_weight_cross_attn[:256, :]
a__ : Dict = in_proj_bias_cross_attn[:256]
a__ : Tuple = in_proj_weight_cross_attn[256:512, :]
a__ : str = in_proj_bias_cross_attn[256:512]
a__ : Union[str, Any] = in_proj_weight_cross_attn[-256:, :]
a__ : Dict = in_proj_bias_cross_attn[-256:]
def __a ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int ):
a__ , a__ : Dict = image.size
a__ : List[Any] = max(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : List[Any] = 800 if '''detection''' in checkpoint_url else 1000
a__ : List[str] = target_max_size / current_max_size
a__ : str = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __a ( lowerCAmelCase__ : str ):
a__ : Union[str, Any] = F.to_tensor(lowerCAmelCase__ )
a__ : Tuple = F.normalize(lowerCAmelCase__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def __a ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str ):
logger.info('''Converting model...''' )
# load original state dict
a__ : Tuple = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
a__ : int = rename_backbone_keys(lowerCAmelCase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCAmelCase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a__ : Optional[int] = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
a__ : Tuple = state_dict.pop(lowerCAmelCase__ )
a__ : str = val
# create HuggingFace model and load state dict
a__ : Union[str, Any] = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
a__ : int = 15
a__ : str = 2
a__ : Union[str, Any] = {0: '''table''', 1: '''table rotated'''}
a__ : List[Any] = idalabel
a__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
else:
a__ : List[str] = 125
a__ : List[str] = 6
a__ : List[Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
a__ : Any = idalabel
a__ : Dict = {v: k for k, v in idalabel.items()}
a__ : List[Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1000 )
a__ : Dict = TableTransformerForObjectDetection(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
# verify our conversion
a__ : Optional[int] = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
a__ : Optional[int] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=lowerCAmelCase__ )
a__ : List[Any] = Image.open(lowerCAmelCase__ ).convert('''RGB''' )
a__ : int = normalize(resize(lowerCAmelCase__ , lowerCAmelCase__ ) ).unsqueeze(0 )
a__ : List[Any] = model(lowerCAmelCase__ )
if "detection" in checkpoint_url:
a__ : List[str] = (1, 15, 3)
a__ : Dict = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
a__ : int = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
a__ : Optional[Any] = (1, 125, 7)
a__ : List[Any] = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
a__ : int = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
image_processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
a__ : Any = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(lowerCAmelCase__ )
image_processor.push_to_hub(lowerCAmelCase__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 688 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
__SCREAMING_SNAKE_CASE = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
__SCREAMING_SNAKE_CASE = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
__SCREAMING_SNAKE_CASE = reader.read()
__SCREAMING_SNAKE_CASE = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
__SCREAMING_SNAKE_CASE = UNetaDModel(**config)
else:
__SCREAMING_SNAKE_CASE = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
__SCREAMING_SNAKE_CASE = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__SCREAMING_SNAKE_CASE = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__SCREAMING_SNAKE_CASE = config[key]
del config[key]
__SCREAMING_SNAKE_CASE = [k.replace('UNetRes', '') for k in config['down_block_types']]
__SCREAMING_SNAKE_CASE = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
__SCREAMING_SNAKE_CASE = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
__SCREAMING_SNAKE_CASE = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
__SCREAMING_SNAKE_CASE = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
__SCREAMING_SNAKE_CASE = param_value
__SCREAMING_SNAKE_CASE = True
if not has_changed:
__SCREAMING_SNAKE_CASE = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 688 | 1 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__SCREAMING_SNAKE_CASE = random.Random()
def __a ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]=1.0 , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Dict=None ):
if rng is None:
a__ : Tuple = global_rng
a__ : Optional[int] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , A__ : Dict , A__ : List[Any]=7 , A__ : List[str]=4_0_0 , A__ : int=2_0_0_0 , A__ : str=1 , A__ : Optional[int]=0.0 , A__ : Optional[int]=1_6_0_0_0 , A__ : Dict=True , A__ : List[str]=True , ) -> Optional[int]:
'''simple docstring'''
a__ : Optional[int] = parent
a__ : Optional[int] = batch_size
a__ : List[Any] = min_seq_length
a__ : Optional[Any] = max_seq_length
a__ : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a__ : Any = feature_size
a__ : List[Any] = padding_value
a__ : List[Any] = sampling_rate
a__ : str = return_attention_mask
a__ : int = do_normalize
def __lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __lowerCAmelCase ( self : Dict , A__ : Tuple=False , A__ : Optional[Any]=False ) -> Optional[int]:
'''simple docstring'''
def _flatten(A__ : Optional[int] ):
return list(itertools.chain(*A__ ) )
if equal_length:
a__ : Optional[Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
a__ : List[str] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a__ : Optional[Any] = [np.asarray(A__ ) for x in speech_inputs]
return speech_inputs
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = WavaVecaFeatureExtractor
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
a__ : List[str] = WavaVecaFeatureExtractionTester(self )
def __lowerCAmelCase ( self : Optional[Any] , A__ : Any ) -> int:
'''simple docstring'''
self.assertTrue(np.all(np.mean(A__ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(A__ , axis=0 ) - 1 ) < 1E-3 ) )
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
'''simple docstring'''
a__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a__ : List[str] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__ : Dict = [np.asarray(A__ ) for speech_input in speech_inputs]
# Test not batched input
a__ : List[str] = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
a__ : Optional[Any] = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(A__ , A__ , atol=1E-3 ) )
# Test batched
a__ : Optional[int] = feat_extract(A__ , return_tensors='''np''' ).input_values
a__ : Optional[int] = feat_extract(A__ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
a__ : List[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
a__ : Tuple = np.asarray(A__ )
a__ : Optional[int] = feat_extract(A__ , return_tensors='''np''' ).input_values
a__ : Optional[int] = feat_extract(A__ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1E-3 ) )
def __lowerCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
a__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ : Dict = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__ : Dict = ['''longest''', '''max_length''', '''do_not_pad''']
a__ : Any = [None, 1_6_0_0, None]
for max_length, padding in zip(A__ , A__ ):
a__ : Optional[Any] = feat_extract(A__ , padding=A__ , max_length=A__ , return_tensors='''np''' )
a__ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def __lowerCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
a__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ : List[Any] = range(8_0_0 , 1_4_0_0 , 2_0_0 )
a__ : Any = [floats_list((1, x) )[0] for x in lengths]
a__ : Tuple = ['''longest''', '''max_length''', '''do_not_pad''']
a__ : List[str] = [None, 1_6_0_0, None]
for max_length, padding in zip(A__ , A__ ):
a__ : Dict = feat_extract(A__ , max_length=A__ , padding=A__ )
a__ : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
a__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ : Optional[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__ : Dict = feat_extract(
A__ , truncation=A__ , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''' )
a__ : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __lowerCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
a__ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ : Tuple = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__ : Optional[int] = feat_extract(
A__ , truncation=A__ , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''' )
a__ : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
a__ : Optional[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__ : str = feat_extract(
A__ , truncation=A__ , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''' )
a__ : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
@require_torch
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
import torch
a__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ : Optional[int] = np.random.rand(1_0_0 ).astype(np.floataa )
a__ : List[str] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a__ : Optional[Any] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
a__ : Tuple = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
a__ : str = WavaVecaConfig.from_pretrained(A__ )
a__ : str = WavaVecaFeatureExtractor.from_pretrained(A__ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == '''layer''' )
| 688 |
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = (KDPMaDiscreteScheduler,)
__UpperCamelCase = 10
def __lowerCAmelCase ( self : Optional[Any] , **A__ : Optional[int] ) -> int:
'''simple docstring'''
a__ : Optional[int] = {
'''num_train_timesteps''': 1_1_0_0,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**A__ )
return config
def __lowerCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A__ )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=A__ , beta_end=A__ )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A__ )
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A__ )
def __lowerCAmelCase ( self : str ) -> Optional[int]:
'''simple docstring'''
a__ : Any = self.scheduler_classes[0]
a__ : str = self.get_scheduler_config(prediction_type='''v_prediction''' )
a__ : Dict = scheduler_class(**A__ )
scheduler.set_timesteps(self.num_inference_steps )
a__ : Tuple = self.dummy_model()
a__ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
a__ : Dict = sample.to(A__ )
for i, t in enumerate(scheduler.timesteps ):
a__ : Optional[Any] = scheduler.scale_model_input(A__ , A__ )
a__ : Union[str, Any] = model(A__ , A__ )
a__ : List[str] = scheduler.step(A__ , A__ , A__ )
a__ : Optional[Any] = output.prev_sample
a__ : Tuple = torch.sum(torch.abs(A__ ) )
a__ : Optional[int] = torch.mean(torch.abs(A__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0_002 ) < 1E-3
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
if torch_device == "mps":
return
a__ : List[Any] = self.scheduler_classes[0]
a__ : Tuple = self.get_scheduler_config()
a__ : Tuple = scheduler_class(**A__ )
scheduler.set_timesteps(self.num_inference_steps )
a__ : List[Any] = self.dummy_model()
a__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
a__ : Any = sample.to(A__ )
for i, t in enumerate(scheduler.timesteps ):
a__ : str = scheduler.scale_model_input(A__ , A__ )
a__ : List[str] = model(A__ , A__ )
a__ : str = scheduler.step(A__ , A__ , A__ )
a__ : List[Any] = output.prev_sample
a__ : Dict = torch.sum(torch.abs(A__ ) )
a__ : Optional[Any] = torch.mean(torch.abs(A__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
def __lowerCAmelCase ( self : str ) -> int:
'''simple docstring'''
if torch_device == "mps":
return
a__ : Optional[int] = self.scheduler_classes[0]
a__ : Tuple = self.get_scheduler_config()
a__ : List[Any] = scheduler_class(**A__ )
scheduler.set_timesteps(self.num_inference_steps , device=A__ )
a__ : Union[str, Any] = self.dummy_model()
a__ : List[Any] = self.dummy_sample_deter.to(A__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
a__ : Optional[int] = scheduler.scale_model_input(A__ , A__ )
a__ : List[Any] = model(A__ , A__ )
a__ : Any = scheduler.step(A__ , A__ , A__ )
a__ : List[str] = output.prev_sample
a__ : Any = torch.sum(torch.abs(A__ ) )
a__ : Union[str, Any] = torch.mean(torch.abs(A__ ) )
if str(A__ ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
| 688 | 1 |
'''simple docstring'''
import math
def __a ( lowerCAmelCase__ : int ):
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
a__ : List[Any] = range(3 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def __a ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple=1 , **lowerCAmelCase__ : str ):
a__ : int = factor * value
a__ : int = value
while not is_prime(lowerCAmelCase__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowerCAmelCase__ )
return value
| 688 |
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
a__ : str = ['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
a__ , a__ : List[Any] = get_aligned_output_features_output_indices(A__ , A__ , A__ )
self.assertEqual(A__ , ['''c'''] )
self.assertEqual(A__ , [2] )
# Out indices set to match out features
a__ , a__ : Optional[int] = get_aligned_output_features_output_indices(['''a''', '''c'''] , A__ , A__ )
self.assertEqual(A__ , ['''a''', '''c'''] )
self.assertEqual(A__ , [0, 2] )
# Out features set to match out indices
a__ , a__ : int = get_aligned_output_features_output_indices(A__ , [0, 2] , A__ )
self.assertEqual(A__ , ['''a''', '''c'''] )
self.assertEqual(A__ , [0, 2] )
# Out features selected from negative indices
a__ , a__ : List[str] = get_aligned_output_features_output_indices(A__ , [-3, -1] , A__ )
self.assertEqual(A__ , ['''a''', '''c'''] )
self.assertEqual(A__ , [-3, -1] )
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , A__ )
# Out features must be a list
with self.assertRaises(A__ ):
verify_out_features_out_indices(('''a''', '''b''') , (0, 1) , ['''a''', '''b'''] )
# Out features must be a subset of stage names
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , ['''a'''] )
# Out indices must be a list or tuple
with self.assertRaises(A__ ):
verify_out_features_out_indices(A__ , 0 , ['''a''', '''b'''] )
# Out indices must be a subset of stage names
with self.assertRaises(A__ ):
verify_out_features_out_indices(A__ , (0, 1) , ['''a'''] )
# Out features and out indices must be the same length
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0,) , ['''a''', '''b''', '''c'''] )
# Out features should match out indices
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 2) , ['''a''', '''b''', '''c'''] )
# Out features and out indices should be in order
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''b''', '''a'''] , (0, 1) , ['''a''', '''b'''] )
# Check passes with valid inputs
verify_out_features_out_indices(['''a''', '''b''', '''d'''] , (0, 1, -1) , ['''a''', '''b''', '''c''', '''d'''] )
def __lowerCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
a__ : Optional[Any] = BackboneMixin()
a__ : int = ['''a''', '''b''', '''c''']
a__ : List[Any] = ['''a''', '''c''']
a__ : Tuple = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
a__ : Dict = ['''a''', '''b''']
self.assertEqual(backbone.out_features , ['''a''', '''b'''] )
self.assertEqual(backbone.out_indices , [0, 1] )
a__ : int = [-3, -1]
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 688 | 1 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __a ( lowerCAmelCase__ : Dict ):
a__ , a__ : int = image.size
a__ , a__ : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
a__ : Tuple = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
a__ : List[Any] = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 255.0
a__ : Any = image[None].transpose(0 , 3 , 1 , 2 )
a__ : Dict = torch.from_numpy(lowerCAmelCase__ )
return 2.0 * image - 1.0
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , A__ : VQModel , A__ : UNetaDModel , A__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=A__ , unet=A__ , scheduler=A__ )
@torch.no_grad()
def __call__( self : List[str] , A__ : Union[torch.Tensor, PIL.Image.Image] = None , A__ : Optional[int] = 1 , A__ : Optional[int] = 1_0_0 , A__ : Optional[float] = 0.0 , A__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A__ : Optional[str] = "pil" , A__ : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
if isinstance(A__ , PIL.Image.Image ):
a__ : List[Any] = 1
elif isinstance(A__ , torch.Tensor ):
a__ : List[str] = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(A__ )}' )
if isinstance(A__ , PIL.Image.Image ):
a__ : Union[str, Any] = preprocess(A__ )
a__ , a__ : Dict = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
a__ : Optional[int] = (batch_size, self.unet.config.in_channels // 2, height, width)
a__ : Optional[int] = next(self.unet.parameters() ).dtype
a__ : List[str] = randn_tensor(A__ , generator=A__ , device=self.device , dtype=A__ )
a__ : Any = image.to(device=self.device , dtype=A__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(A__ , device=self.device )
a__ : int = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
a__ : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a__ : Union[str, Any] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a__ : str = {}
if accepts_eta:
a__ : Dict = eta
for t in self.progress_bar(A__ ):
# concat latents and low resolution image in the channel dimension.
a__ : str = torch.cat([latents, image] , dim=1 )
a__ : Optional[Any] = self.scheduler.scale_model_input(A__ , A__ )
# predict the noise residual
a__ : Union[str, Any] = self.unet(A__ , A__ ).sample
# compute the previous noisy sample x_t -> x_t-1
a__ : Union[str, Any] = self.scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
# decode the image latents with the VQVAE
a__ : List[Any] = self.vqvae.decode(A__ ).sample
a__ : List[Any] = torch.clamp(A__ , -1.0 , 1.0 )
a__ : Optional[Any] = image / 2 + 0.5
a__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a__ : Union[str, Any] = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 688 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __a ( lowerCAmelCase__ : List[Any] ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def __a ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any ):
a__ : Dict = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
a__ : Any = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' )
a__ : int = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' )
a__ : Optional[Any] = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' )
a__ : Dict = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' )
a__ : List[str] = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' )
a__ : List[Any] = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' )
a__ : str = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' )
a__ : List[Any] = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' )
a__ : List[Any] = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' )
a__ : str = key.replace('''image_encoder.module''' , '''flava.image_model''' )
a__ : Dict = key.replace('''text_encoder.module''' , '''flava.text_model''' )
a__ : List[Any] = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' )
a__ : List[str] = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' )
a__ : List[str] = key.replace('''text_projection''' , '''flava.text_projection''' )
a__ : Any = key.replace('''image_projection''' , '''flava.image_projection''' )
a__ : Any = value.float()
for key, value in codebook_state_dict.items():
a__ : List[str] = value
return upgrade
@torch.no_grad()
def __a ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict=None ):
if config_path is not None:
a__ : Tuple = FlavaConfig.from_pretrained(lowerCAmelCase__ )
else:
a__ : Optional[int] = FlavaConfig()
a__ : List[Any] = FlavaForPreTraining(lowerCAmelCase__ ).eval()
a__ : Optional[int] = convert_dalle_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , save_checkpoint=lowerCAmelCase__ )
if os.path.exists(lowerCAmelCase__ ):
a__ : List[str] = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
else:
a__ : Dict = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='''cpu''' )
a__ : List[Any] = upgrade_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
hf_model.load_state_dict(lowerCAmelCase__ )
a__ : Any = hf_model.state_dict()
a__ : Optional[Any] = count_parameters(lowerCAmelCase__ )
a__ : int = count_parameters(lowerCAmelCase__ ) + count_parameters(lowerCAmelCase__ )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 )
hf_model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 688 | 1 |
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Tuple ) -> int:
'''simple docstring'''
a__ : Optional[Any] = 0
a__ : List[Any] = [0]
a__ : Any = [0]
a__ : List[str] = len(A__ )
self.assertEqual(k.knapsack(A__ , A__ , A__ , A__ ) , 0 )
a__ : Dict = [6_0]
a__ : Union[str, Any] = [1_0]
a__ : Optional[int] = len(A__ )
self.assertEqual(k.knapsack(A__ , A__ , A__ , A__ ) , 0 )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
a__ : Dict = 3
a__ : Tuple = [1, 2, 3]
a__ : List[str] = [3, 2, 1]
a__ : Dict = len(A__ )
self.assertEqual(k.knapsack(A__ , A__ , A__ , A__ ) , 5 )
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
a__ : int = 5_0
a__ : List[str] = [6_0, 1_0_0, 1_2_0]
a__ : Union[str, Any] = [1_0, 2_0, 3_0]
a__ : int = len(A__ )
self.assertEqual(k.knapsack(A__ , A__ , A__ , A__ ) , 2_2_0 )
if __name__ == "__main__":
unittest.main()
| 688 |
'''simple docstring'''
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__SCREAMING_SNAKE_CASE = 4
__SCREAMING_SNAKE_CASE = 3
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
pass
def __a ( lowerCAmelCase__ : List[str] ):
for shard in shards:
for i in range(lowerCAmelCase__ ):
yield {"i": i, "shard": shard}
def __a ( ):
a__ : str = int(os.environ['''RANK'''] )
a__ : int = int(os.environ['''WORLD_SIZE'''] )
a__ : str = ArgumentParser()
parser.add_argument('''--streaming''' , type=lowerCAmelCase__ )
parser.add_argument('''--local_rank''' , type=lowerCAmelCase__ )
parser.add_argument('''--num_workers''' , type=lowerCAmelCase__ , default=0 )
a__ : int = parser.parse_args()
a__ : List[str] = args.streaming
a__ : Dict = args.num_workers
a__ : Dict = {'''shards''': [F'shard_{shard_idx}' for shard_idx in range(lowerCAmelCase__ )]}
a__ : Tuple = IterableDataset.from_generator(lowerCAmelCase__ , gen_kwargs=lowerCAmelCase__ )
if not streaming:
a__ : str = Dataset.from_list(list(lowerCAmelCase__ ) )
a__ : Optional[int] = split_dataset_by_node(lowerCAmelCase__ , rank=lowerCAmelCase__ , world_size=lowerCAmelCase__ )
a__ : Dict = torch.utils.data.DataLoader(lowerCAmelCase__ , num_workers=lowerCAmelCase__ )
a__ : str = NUM_SHARDS * NUM_ITEMS_PER_SHARD
a__ : Dict = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
a__ : str = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'local_size {local_size} != expected_local_size {expected_local_size}' )
if __name__ == "__main__":
main()
| 688 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def __lowerCAmelCase ( A__ : ArgumentParser ) -> List[str]:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def __lowerCAmelCase ( self : str ) -> List[str]:
'''simple docstring'''
raise NotImplementedError()
| 688 |
'''simple docstring'''
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__SCREAMING_SNAKE_CASE = open # noqa: we just need to have a builtin inside this module to test it properly
| 688 | 1 |
'''simple docstring'''
def __a ( lowerCAmelCase__ : int | float | str ):
try:
a__ : Dict = float(lowerCAmelCase__ )
except ValueError:
raise ValueError('''Please enter a valid number''' )
a__ : Optional[int] = decimal - int(lowerCAmelCase__ )
if fractional_part == 0:
return int(lowerCAmelCase__ ), 1
else:
a__ : str = len(str(lowerCAmelCase__ ).split('''.''' )[1] )
a__ : Dict = int(decimal * (10**number_of_frac_digits) )
a__ : Optional[Any] = 10**number_of_frac_digits
a__ , a__ : Optional[Any] = denominator, numerator
while True:
a__ : Dict = dividend % divisor
if remainder == 0:
break
a__ , a__ : Tuple = divisor, remainder
a__ , a__ : List[str] = numerator / divisor, denominator / divisor
return int(lowerCAmelCase__ ), int(lowerCAmelCase__ )
if __name__ == "__main__":
print(f'{decimal_to_fraction(2) = }')
print(f'{decimal_to_fraction(89.0) = }')
print(f'{decimal_to_fraction("67") = }')
print(f'{decimal_to_fraction("45.0") = }')
print(f'{decimal_to_fraction(1.5) = }')
print(f'{decimal_to_fraction("6.25") = }')
print(f'{decimal_to_fraction("78td") = }')
| 688 |
'''simple docstring'''
import enum
import shutil
import sys
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = shutil.get_terminal_size()
__SCREAMING_SNAKE_CASE = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class lowerCAmelCase__ ( enum.Enum ):
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 1
def __a ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict="" ):
sys.stdout.write(str(lowerCAmelCase__ ) + end )
sys.stdout.flush()
def __a ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : int="" ):
forceWrite(F'\u001b[{color}m{content}\u001b[0m' , lowerCAmelCase__ )
def __a ( ):
forceWrite('''\r''' )
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : str ):
forceWrite(F'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def __a ( ):
forceWrite(''' ''' * TERMINAL_WIDTH )
reset_cursor()
def __a ( ):
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH )
| 688 | 1 |
'''simple docstring'''
import inspect
import unittest
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Dict ) -> Dict:
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def __lowerCAmelCase ( self : int ) -> str:
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
a__ : Optional[int] = inspect.getmembers(A__ , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
a__ : int = '''k-diffusion'''
elif backend == "invisible_watermark":
a__ : int = '''invisible-watermark'''
assert backend in deps, F'{backend} is not in the deps table!'
| 688 |
'''simple docstring'''
import inspect
import unittest
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Dict ) -> Dict:
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def __lowerCAmelCase ( self : int ) -> str:
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
a__ : Optional[int] = inspect.getmembers(A__ , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
a__ : int = '''k-diffusion'''
elif backend == "invisible_watermark":
a__ : int = '''invisible-watermark'''
assert backend in deps, F'{backend} is not in the deps table!'
| 688 | 1 |
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def __a ( lowerCAmelCase__ : Optional[int] ):
a__ : Any = np.max(lowerCAmelCase__ , axis=-1 , keepdims=lowerCAmelCase__ )
a__ : Dict = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowerCAmelCase__ )
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __lowerCAmelCase ( self : Optional[Any] , **A__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
a__ : Dict = {}
if "second_text" in kwargs:
a__ : str = kwargs['''second_text''']
return preprocess_kwargs, {}, {}
def __lowerCAmelCase ( self : Any , A__ : int , A__ : int=None ) -> str:
'''simple docstring'''
return self.tokenizer(A__ , text_pair=A__ , return_tensors=self.framework )
def __lowerCAmelCase ( self : Dict , A__ : str ) -> Any:
'''simple docstring'''
return self.model(**A__ )
def __lowerCAmelCase ( self : int , A__ : Any ) -> Union[str, Any]:
'''simple docstring'''
a__ : Any = model_outputs.logits[0].numpy()
a__ : List[Any] = softmax(A__ )
a__ : Dict = np.argmax(A__ )
a__ : Any = self.model.config.idalabel[best_class]
a__ : str = probabilities[best_class].item()
a__ : Any = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 688 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __a ( lowerCAmelCase__ : Dict ):
a__ , a__ : int = image.size
a__ , a__ : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
a__ : Tuple = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
a__ : List[Any] = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 255.0
a__ : Any = image[None].transpose(0 , 3 , 1 , 2 )
a__ : Dict = torch.from_numpy(lowerCAmelCase__ )
return 2.0 * image - 1.0
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , A__ : VQModel , A__ : UNetaDModel , A__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=A__ , unet=A__ , scheduler=A__ )
@torch.no_grad()
def __call__( self : List[str] , A__ : Union[torch.Tensor, PIL.Image.Image] = None , A__ : Optional[int] = 1 , A__ : Optional[int] = 1_0_0 , A__ : Optional[float] = 0.0 , A__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A__ : Optional[str] = "pil" , A__ : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
if isinstance(A__ , PIL.Image.Image ):
a__ : List[Any] = 1
elif isinstance(A__ , torch.Tensor ):
a__ : List[str] = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(A__ )}' )
if isinstance(A__ , PIL.Image.Image ):
a__ : Union[str, Any] = preprocess(A__ )
a__ , a__ : Dict = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
a__ : Optional[int] = (batch_size, self.unet.config.in_channels // 2, height, width)
a__ : Optional[int] = next(self.unet.parameters() ).dtype
a__ : List[str] = randn_tensor(A__ , generator=A__ , device=self.device , dtype=A__ )
a__ : Any = image.to(device=self.device , dtype=A__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(A__ , device=self.device )
a__ : int = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
a__ : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a__ : Union[str, Any] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a__ : str = {}
if accepts_eta:
a__ : Dict = eta
for t in self.progress_bar(A__ ):
# concat latents and low resolution image in the channel dimension.
a__ : str = torch.cat([latents, image] , dim=1 )
a__ : Optional[Any] = self.scheduler.scale_model_input(A__ , A__ )
# predict the noise residual
a__ : Union[str, Any] = self.unet(A__ , A__ ).sample
# compute the previous noisy sample x_t -> x_t-1
a__ : Union[str, Any] = self.scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
# decode the image latents with the VQVAE
a__ : List[Any] = self.vqvae.decode(A__ ).sample
a__ : List[Any] = torch.clamp(A__ , -1.0 , 1.0 )
a__ : Optional[Any] = image / 2 + 0.5
a__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a__ : Union[str, Any] = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 688 | 1 |
'''simple docstring'''
def __a ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , ):
a__ : List[str] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('''All input parameters must be positive''' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('''Relative densities cannot be greater than one''' )
else:
a__ : Any = 1 - (matter_density + radiation_density + dark_energy)
a__ : int = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
a__ : Optional[Any] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
__SCREAMING_SNAKE_CASE = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 688 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
__SCREAMING_SNAKE_CASE = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def __a ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : str=8 ):
a__ : Tuple = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
a__ : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , A__ : UNetaDConditionModel , A__ : DDPMScheduler , A__ : VQModel , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=A__ , scheduler=A__ , movq=A__ , )
a__ : Union[str, Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCAmelCase ( self : Optional[Any] , A__ : List[Any] , A__ : List[str] , A__ : Optional[Any] , A__ : Dict , A__ : Dict , A__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
if latents is None:
a__ : List[str] = randn_tensor(A__ , generator=A__ , device=A__ , dtype=A__ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
a__ : int = latents.to(A__ )
a__ : Tuple = latents * scheduler.init_noise_sigma
return latents
def __lowerCAmelCase ( self : Union[str, Any] , A__ : int=0 ) -> str:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
a__ : Union[str, Any] = torch.device(F'cuda:{gpu_id}' )
a__ : Union[str, Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A__ , A__ )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Tuple=0 ) -> Dict:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
a__ : int = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=A__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
a__ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
a__ , a__ : List[str] = cpu_offload_with_hook(A__ , A__ , prev_module_hook=A__ )
# We'll offload the last model manually.
a__ : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(A__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A__ )
def __call__( self : Any , A__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A__ : torch.FloatTensor , A__ : int = 5_1_2 , A__ : int = 5_1_2 , A__ : int = 1_0_0 , A__ : float = 4.0 , A__ : int = 1 , A__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A__ : Optional[torch.FloatTensor] = None , A__ : Optional[str] = "pil" , A__ : bool = True , ) -> str:
'''simple docstring'''
a__ : Optional[Any] = self._execution_device
a__ : List[str] = guidance_scale > 1.0
if isinstance(A__ , A__ ):
a__ : int = torch.cat(A__ , dim=0 )
if isinstance(A__ , A__ ):
a__ : Optional[int] = torch.cat(A__ , dim=0 )
if isinstance(A__ , A__ ):
a__ : int = torch.cat(A__ , dim=0 )
a__ : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
a__ : Tuple = image_embeds.repeat_interleave(A__ , dim=0 )
a__ : Optional[int] = negative_image_embeds.repeat_interleave(A__ , dim=0 )
a__ : Optional[int] = hint.repeat_interleave(A__ , dim=0 )
a__ : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A__ )
a__ : Tuple = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=A__ )
self.scheduler.set_timesteps(A__ , device=A__ )
a__ : int = self.scheduler.timesteps
a__ : str = self.movq.config.latent_channels
a__ , a__ : Optional[int] = downscale_height_and_width(A__ , A__ , self.movq_scale_factor )
# create initial latent
a__ : List[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A__ , A__ , A__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A__ ) ):
# expand the latents if we are doing classifier free guidance
a__ : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a__ : List[str] = {'''image_embeds''': image_embeds, '''hint''': hint}
a__ : Union[str, Any] = self.unet(
sample=A__ , timestep=A__ , encoder_hidden_states=A__ , added_cond_kwargs=A__ , return_dict=A__ , )[0]
if do_classifier_free_guidance:
a__ , a__ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
a__ , a__ : Dict = noise_pred.chunk(2 )
a__ , a__ : Optional[Any] = variance_pred.chunk(2 )
a__ : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
a__ : Union[str, Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
a__ , a__ : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
a__ : Union[str, Any] = self.scheduler.step(
A__ , A__ , A__ , generator=A__ , )[0]
# post-processing
a__ : Tuple = self.movq.decode(A__ , force_not_quantize=A__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
a__ : Union[str, Any] = image * 0.5 + 0.5
a__ : str = image.clamp(0 , 1 )
a__ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a__ : int = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 688 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def __a ( lowerCAmelCase__ : Callable[[int | float], int | float] , lowerCAmelCase__ : int | float , lowerCAmelCase__ : int | float , lowerCAmelCase__ : int = 100 , ):
a__ : Tuple = x_start
a__ : str = fnc(lowerCAmelCase__ )
a__ : Tuple = 0.0
for _ in range(lowerCAmelCase__ ):
# Approximates curve as a sequence of linear lines and sums their length
a__ : str = (x_end - x_start) / steps + xa
a__ : Tuple = fnc(lowerCAmelCase__ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
a__ : Union[str, Any] = xa
a__ : List[str] = fxa
return length
if __name__ == "__main__":
def __a ( lowerCAmelCase__ : List[str] ):
return math.sin(10 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
__SCREAMING_SNAKE_CASE = 1_0
while i <= 1_0_0_0_0_0:
print(f'With {i} steps: {line_length(f, -1_0, 1_0, i)}')
i *= 1_0
| 688 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
__SCREAMING_SNAKE_CASE = {
'facebook/esm2_t6_8M_UR50D': 1_0_2_4,
'facebook/esm2_t12_35M_UR50D': 1_0_2_4,
}
def __a ( lowerCAmelCase__ : Union[str, Any] ):
with open(lowerCAmelCase__ , '''r''' ) as f:
a__ : Optional[int] = f.read().splitlines()
return [l.strip() for l in lines]
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self : List[str] , A__ : int , A__ : Union[str, Any]="<unk>" , A__ : Tuple="<cls>" , A__ : List[Any]="<pad>" , A__ : Optional[int]="<mask>" , A__ : List[Any]="<eos>" , **A__ : Optional[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**A__ )
a__ : Union[str, Any] = load_vocab_file(A__ )
a__ : int = dict(enumerate(self.all_tokens ) )
a__ : str = {tok: ind for ind, tok in enumerate(self.all_tokens )}
a__ : List[Any] = unk_token
a__ : Any = cls_token
a__ : Any = pad_token
a__ : Any = mask_token
a__ : Any = eos_token
a__ : int = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __lowerCAmelCase ( self : Any , A__ : int ) -> str:
'''simple docstring'''
return self._id_to_token.get(A__ , self.unk_token )
def __lowerCAmelCase ( self : Optional[Any] , A__ : str ) -> int:
'''simple docstring'''
return self._token_to_id.get(A__ , self._token_to_id.get(self.unk_token ) )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Tuple , **A__ : str ) -> List[Any]:
'''simple docstring'''
return text.split()
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Optional[int]=False ) -> Tuple:
'''simple docstring'''
return len(self._id_to_token )
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def __lowerCAmelCase ( self : Any , A__ : str ) -> int:
'''simple docstring'''
return self._token_to_id.get(A__ , self._token_to_id.get(self.unk_token ) )
def __lowerCAmelCase ( self : List[Any] , A__ : int ) -> str:
'''simple docstring'''
return self._id_to_token.get(A__ , self.unk_token )
def __lowerCAmelCase ( self : str , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : Tuple = [self.cls_token_id]
a__ : Union[str, Any] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __lowerCAmelCase ( self : Tuple , A__ : List , A__ : Optional[List] = None , A__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
a__ : Any = [1] + ([0] * len(A__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(A__ ) + [1]
return mask
def __lowerCAmelCase ( self : Any , A__ : Dict , A__ : Dict ) -> List[Any]:
'''simple docstring'''
a__ : Union[str, Any] = os.path.join(A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(A__ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __lowerCAmelCase ( self : Any ) -> int:
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=A__ )
def __lowerCAmelCase ( self : List[str] , A__ : Union[List[str], List[AddedToken]] , A__ : bool = False ) -> int:
'''simple docstring'''
return super()._add_tokens(A__ , special_tokens=A__ )
| 688 | 1 |
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __a ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple ):
a__ : Tuple = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
a__ : Dict = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
a__ : Dict = F'{src_lang}-{tgt_lang}'
a__ : int = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
a__ : Any = os.path.join(lowerCAmelCase__ , '''README.md''' )
print(F'Generating {path}' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(lowerCAmelCase__ )
# make sure we are under the root of the project
__SCREAMING_SNAKE_CASE = Path(__file__).resolve().parent.parent.parent
__SCREAMING_SNAKE_CASE = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model_name.split('-')
__SCREAMING_SNAKE_CASE = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 688 |
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
__SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : str ) -> Dict:
'''simple docstring'''
a__ : List[str] = False
def __lowerCAmelCase ( self : Tuple , A__ : Optional[int] , A__ : Optional[Any] , A__ : List[str] , A__ : Tuple ) -> Optional[int]:
'''simple docstring'''
if not self.initialized:
a__ : Optional[Any] = RagRetriever(
A__ , question_encoder_tokenizer=A__ , generator_tokenizer=A__ , index=A__ , init_retrieval=A__ , )
a__ : Union[str, Any] = True
def __lowerCAmelCase ( self : Tuple ) -> Tuple:
'''simple docstring'''
self.retriever.index.init_index()
def __lowerCAmelCase ( self : List[Any] , A__ : List[Any] , A__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
a__ , a__ : Optional[Any] = self.retriever._main_retrieve(A__ , A__ )
return doc_ids, retrieved_doc_embeds
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : str , A__ : Optional[int] , A__ : List[Any] , A__ : List[Any] , A__ : str , A__ : Any=None ) -> Optional[Any]:
'''simple docstring'''
if index is not None and index.is_initialized() and len(A__ ) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''' )
super().__init__(
A__ , question_encoder_tokenizer=A__ , generator_tokenizer=A__ , index=A__ , init_retrieval=A__ , )
a__ : List[str] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(A__ , A__ , A__ , A__ )
for worker in self.retrieval_workers
] )
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
logger.info('''initializing retrieval''' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __lowerCAmelCase ( self : Optional[int] , A__ : Optional[int] , A__ : int ) -> Dict:
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
a__ : List[Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
a__ , a__ : Tuple = ray.get(random_worker.retrieve.remote(A__ , A__ ) )
else:
a__ , a__ : int = self._main_retrieve(A__ , A__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A__ )
@classmethod
def __lowerCAmelCase ( cls : int , A__ : Optional[Any] , A__ : Any=None , **A__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return super(A__ , cls ).get_tokenizers(A__ , A__ , **A__ )
@classmethod
def __lowerCAmelCase ( cls : int , A__ : Optional[int] , A__ : Union[str, Any] , A__ : Union[str, Any]=None , **A__ : Dict ) -> List[Any]:
'''simple docstring'''
a__ : Dict = kwargs.pop('''config''' , A__ ) or RagConfig.from_pretrained(A__ , **A__ )
a__ : Dict = RagTokenizer.from_pretrained(A__ , config=A__ )
a__ : str = rag_tokenizer.question_encoder
a__ : List[str] = rag_tokenizer.generator
if indexed_dataset is not None:
a__ : List[Any] = '''custom'''
a__ : List[Any] = CustomHFIndex(config.retrieval_vector_size , A__ )
else:
a__ : Optional[Any] = cls._build_index(A__ )
return cls(
A__ , question_encoder_tokenizer=A__ , generator_tokenizer=A__ , retrieval_workers=A__ , index=A__ , )
| 688 | 1 |
'''simple docstring'''
def __a ( lowerCAmelCase__ : float , lowerCAmelCase__ : int ):
if digit_amount > 0:
return round(number - int(lowerCAmelCase__ ) , lowerCAmelCase__ )
return number - int(lowerCAmelCase__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 688 |
'''simple docstring'''
def __a ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : int ):
a__ : List[str] = len(lowerCAmelCase__ )
a__ : int = [[0] * n for i in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
a__ : Dict = y_points[i]
for i in range(2 , lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : Any = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 | 1 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
# TODO: upload to AWS
__SCREAMING_SNAKE_CASE = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = "retribert"
def __init__( self : List[str] , A__ : Dict=3_0_5_2_2 , A__ : Any=7_6_8 , A__ : Union[str, Any]=8 , A__ : Dict=1_2 , A__ : Union[str, Any]=3_0_7_2 , A__ : str="gelu" , A__ : List[Any]=0.1 , A__ : Union[str, Any]=0.1 , A__ : Tuple=5_1_2 , A__ : Tuple=2 , A__ : Optional[Any]=0.02 , A__ : int=1E-12 , A__ : Dict=True , A__ : List[str]=1_2_8 , A__ : Tuple=0 , **A__ : Union[str, Any] , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=A__ , **A__ )
a__ : Optional[int] = vocab_size
a__ : Tuple = hidden_size
a__ : List[str] = num_hidden_layers
a__ : Tuple = num_attention_heads
a__ : Dict = hidden_act
a__ : str = intermediate_size
a__ : Optional[Any] = hidden_dropout_prob
a__ : Union[str, Any] = attention_probs_dropout_prob
a__ : Tuple = max_position_embeddings
a__ : Dict = type_vocab_size
a__ : Tuple = initializer_range
a__ : str = layer_norm_eps
a__ : Any = share_encoders
a__ : str = projection_dim
| 688 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = "swin2sr"
__UpperCamelCase = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Union[str, Any] , A__ : int=6_4 , A__ : List[Any]=1 , A__ : List[Any]=3 , A__ : Any=1_8_0 , A__ : Optional[int]=[6, 6, 6, 6, 6, 6] , A__ : Optional[int]=[6, 6, 6, 6, 6, 6] , A__ : Dict=8 , A__ : Any=2.0 , A__ : Optional[int]=True , A__ : Union[str, Any]=0.0 , A__ : Union[str, Any]=0.0 , A__ : List[str]=0.1 , A__ : Any="gelu" , A__ : Tuple=False , A__ : Optional[int]=0.02 , A__ : List[Any]=1E-5 , A__ : Any=2 , A__ : Union[str, Any]=1.0 , A__ : Dict="1conv" , A__ : Optional[Any]="pixelshuffle" , **A__ : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**A__ )
a__ : List[str] = image_size
a__ : Optional[Any] = patch_size
a__ : Dict = num_channels
a__ : Optional[int] = embed_dim
a__ : int = depths
a__ : Optional[int] = len(A__ )
a__ : Dict = num_heads
a__ : List[Any] = window_size
a__ : Optional[int] = mlp_ratio
a__ : Optional[int] = qkv_bias
a__ : Union[str, Any] = hidden_dropout_prob
a__ : Dict = attention_probs_dropout_prob
a__ : Union[str, Any] = drop_path_rate
a__ : int = hidden_act
a__ : int = use_absolute_embeddings
a__ : Dict = layer_norm_eps
a__ : List[str] = initializer_range
a__ : List[Any] = upscale
a__ : List[Any] = img_range
a__ : Optional[int] = resi_connection
a__ : int = upsampler
| 688 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : int , A__ : Optional[Any] , A__ : Optional[int]=1_3 , A__ : int=7 , A__ : int=6 , A__ : List[Any]=1_7 , A__ : Tuple=2_3 , A__ : int=1_1 , A__ : Union[str, Any]=True , ) -> Tuple:
'''simple docstring'''
a__ : List[str] = parent
a__ : Dict = batch_size
a__ : List[Any] = seq_length
a__ : Any = act_dim
a__ : Dict = state_dim
a__ : Any = hidden_size
a__ : List[Any] = max_length
a__ : Optional[Any] = is_training
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
a__ : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
a__ : Any = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
a__ : Tuple = floats_tensor((self.batch_size, self.seq_length, 1) )
a__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, 1) )
a__ : Optional[Any] = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 )
a__ : List[str] = random_attention_mask((self.batch_size, self.seq_length) )
a__ : Any = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __lowerCAmelCase ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Tuple , A__ : Optional[int] , A__ : Tuple , A__ : List[str] , A__ : str , A__ : List[str] , A__ : Any , ) -> str:
'''simple docstring'''
a__ : Dict = DecisionTransformerModel(config=A__ )
model.to(A__ )
model.eval()
a__ : List[str] = model(A__ , A__ , A__ , A__ , A__ , A__ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
a__ : Any = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : List[Any] = config_and_inputs
a__ : Tuple = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (DecisionTransformerModel,) if is_torch_available() else ()
__UpperCamelCase = ()
__UpperCamelCase = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__UpperCamelCase = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
a__ : Tuple = DecisionTransformerModelTester(self )
a__ : List[str] = ConfigTester(self , config_class=A__ , hidden_size=3_7 )
def __lowerCAmelCase ( self : int ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
a__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
@slow
def __lowerCAmelCase ( self : str ) -> int:
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Tuple = DecisionTransformerModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
def __lowerCAmelCase ( self : Tuple ) -> int:
'''simple docstring'''
a__ , a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : List[Any] = model_class(A__ )
a__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[int] = [*signature.parameters.keys()]
a__ : Tuple = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(A__ )] , A__ )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
a__ : Any = 2 # number of steps of autoregressive prediction we will perform
a__ : List[Any] = 1_0 # defined by the RL environment, may be normalized
a__ : Any = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
a__ : Dict = model.to(A__ )
a__ : Tuple = model.config
torch.manual_seed(0 )
a__ : Union[str, Any] = torch.randn(1 , 1 , config.state_dim ).to(device=A__ , dtype=torch.floataa ) # env.reset()
a__ : Dict = torch.tensor(
[[0.242_793, -0.28_693_074, 0.8_742_613], [0.67_815_274, -0.08_101_085, -0.12_952_147]] , device=A__ )
a__ : Tuple = torch.tensor(A__ , device=A__ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
a__ : Dict = state
a__ : List[str] = torch.zeros(1 , 0 , config.act_dim , device=A__ , dtype=torch.floataa )
a__ : str = torch.zeros(1 , 0 , device=A__ , dtype=torch.floataa )
a__ : List[Any] = torch.tensor(0 , device=A__ , dtype=torch.long ).reshape(1 , 1 )
for step in range(A__ ):
a__ : List[str] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=A__ )] , dim=1 )
a__ : Optional[int] = torch.cat([rewards, torch.zeros(1 , 1 , device=A__ )] , dim=1 )
a__ : int = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
a__ , a__ , a__ : Optional[Any] = model(
states=A__ , actions=A__ , rewards=A__ , returns_to_go=A__ , timesteps=A__ , attention_mask=A__ , return_dict=A__ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
a__ , a__ , a__ , a__ : int = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=A__ , dtype=torch.floataa ),
1.0,
False,
{},
)
a__ : int = action_pred[0, -1]
a__ : Optional[Any] = torch.cat([states, state] , dim=1 )
a__ : Optional[Any] = returns_to_go[0, -1] - reward
a__ : Optional[int] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
a__ : Union[str, Any] = torch.cat(
[timesteps, torch.ones((1, 1) , device=A__ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 688 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
a__ : int = 0
def __lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : List[Any] = Path(A__ ) / '''preprocessor_config.json'''
a__ : List[Any] = Path(A__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
a__ : Any = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : int = Path(A__ ) / '''preprocessor_config.json'''
a__ : Optional[Any] = Path(A__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
a__ : Tuple = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Dict = CLIPConfig()
# Create a dummy config file with image_proceesor_type
a__ : int = Path(A__ ) / '''preprocessor_config.json'''
a__ : Optional[int] = Path(A__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
a__ : List[Any] = AutoImageProcessor.from_pretrained(A__ ).to_dict()
config_dict.pop('''image_processor_type''' )
a__ : Union[str, Any] = CLIPImageProcessor(**A__ )
# save in new folder
model_config.save_pretrained(A__ )
config.save_pretrained(A__ )
a__ : Union[str, Any] = AutoImageProcessor.from_pretrained(A__ )
# make sure private variable is not incorrectly saved
a__ : Optional[Any] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Optional[int] = Path(A__ ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
a__ : Any = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
A__ , '''clip-base is not a local folder and is not a valid model identifier''' ):
a__ : str = AutoImageProcessor.from_pretrained('''clip-base''' )
def __lowerCAmelCase ( self : Optional[Any] ) -> int:
'''simple docstring'''
with self.assertRaisesRegex(
A__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
a__ : Tuple = AutoImageProcessor.from_pretrained(A__ , revision='''aaaaaa''' )
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
A__ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
a__ : Union[str, Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
with self.assertRaises(A__ ):
a__ : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A__ ):
a__ : Tuple = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
a__ : Tuple = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(A__ )
a__ : str = AutoImageProcessor.from_pretrained(A__ , trust_remote_code=A__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , A__ )
AutoImageProcessor.register(A__ , A__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A__ ):
AutoImageProcessor.register(A__ , A__ )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Optional[int] = Path(A__ ) / '''preprocessor_config.json'''
a__ : List[str] = Path(A__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
a__ : Tuple = CustomImageProcessor.from_pretrained(A__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(A__ )
a__ : Tuple = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
'''simple docstring'''
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = True
try:
AutoConfig.register('''custom''' , A__ )
AutoImageProcessor.register(A__ , A__ )
# If remote code is not set, the default is to use local
a__ : Dict = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
a__ : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
a__ : Optional[int] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(A__ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 688 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __a ( lowerCAmelCase__ : Optional[int]=None ):
if subparsers is not None:
a__ : Any = subparsers.add_parser('''test''' )
else:
a__ : Optional[int] = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=lowerCAmelCase__ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def __a ( lowerCAmelCase__ : Union[str, Any] ):
a__ : int = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
a__ : int = script_name
else:
a__ : Optional[Any] = F'--config_file={args.config_file} {script_name}'
a__ : List[str] = ['''accelerate-launch'''] + test_args.split()
a__ : Dict = execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def __a ( ):
a__ : Tuple = test_command_parser()
a__ : Optional[Any] = parser.parse_args()
test_command(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 688 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
__SCREAMING_SNAKE_CASE = get_logger(__name__)
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCamelCase = "dummy_data"
__UpperCamelCase = "datasets"
__UpperCamelCase = False
def __init__( self : Any , A__ : str , A__ : str , A__ : Union[Version, str] , A__ : Optional[str] = None , A__ : bool = False , A__ : bool = True , A__ : Optional[List[Callable]] = None , ) -> int:
'''simple docstring'''
a__ : Tuple = 0
a__ : Any = dataset_name
a__ : int = cache_dir
a__ : str = use_local_dummy_data
a__ : List[str] = config
# download_callbacks take a single url as input
a__ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
a__ : str = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
a__ : Optional[Any] = str(A__ )
# to be downloaded
a__ : Tuple = None
a__ : Tuple = None
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if self._dummy_file is None:
a__ : Dict = self.download_dummy_data()
return self._dummy_file
@property
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
a__ : int = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
a__ : str = cached_path(
A__ , cache_dir=self.cache_dir , extract_compressed_file=A__ , force_extract=A__ )
return os.path.join(A__ , self.dummy_file_name )
@property
def __lowerCAmelCase ( self : int ) -> Optional[int]:
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if self._bucket_url is None:
a__ : int = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
'''simple docstring'''
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Optional[int] , *A__ : int ) -> Union[str, Any]:
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
a__ : Tuple = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
a__ : Union[str, Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(A__ , A__ ):
return self.create_dummy_data_dict(A__ , A__ )
elif isinstance(A__ , (list, tuple) ):
return self.create_dummy_data_list(A__ , A__ )
else:
return self.create_dummy_data_single(A__ , A__ )
def __lowerCAmelCase ( self : List[str] , A__ : Any , *A__ : int ) -> Any:
'''simple docstring'''
return self.download_and_extract(A__ )
def __lowerCAmelCase ( self : Any , A__ : Optional[int] , A__ : Optional[Any] ) -> int:
'''simple docstring'''
return self.download_and_extract(A__ )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : int , *A__ : List[Any] , **A__ : str ) -> Optional[Any]:
'''simple docstring'''
return path
def __lowerCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
return {}
def __lowerCAmelCase ( self : int , A__ : Union[str, Any] , A__ : List[str] ) -> Any:
'''simple docstring'''
a__ : int = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(A__ , A__ ):
for single_url in single_urls:
download_callback(A__ )
else:
a__ : Dict = single_urls
download_callback(A__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(A__ , A__ ):
a__ : Optional[int] = [os.path.join(A__ , urllib.parse.quote_plus(Path(A__ ).name ) ) for x in single_urls]
else:
a__ : Optional[Any] = single_urls
a__ : Tuple = os.path.join(A__ , urllib.parse.quote_plus(Path(A__ ).name ) )
a__ : List[str] = value
# make sure that values are unique
if all(isinstance(A__ , A__ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
a__ : Optional[int] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __lowerCAmelCase ( self : Dict , A__ : str , A__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
a__ : str = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
a__ : Union[str, Any] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , A__ ) ) for url in data_url )
a__ : Optional[Any] = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
a__ : Dict = [data_url[0]] * len(A__ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(A__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
a__ : Optional[int] = os.path.join(A__ , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(A__ )
return dummy_data_list
def __lowerCAmelCase ( self : Dict , A__ : Dict , A__ : str ) -> Optional[int]:
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(A__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
a__ : Union[str, Any] = os.path.join(A__ , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(A__ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __lowerCAmelCase ( self : int ) -> str:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Any , A__ : Tuple ) -> Any:
'''simple docstring'''
def _iter_archive_members(A__ : str ):
# this preserves the order of the members inside the ZIP archive
a__ : Dict = Path(self.dummy_file ).parent
a__ : Tuple = path.relative_to(A__ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
a__ : Optional[Any] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(A__ )
a__ : str = Path(A__ )
a__ : Optional[Any] = _iter_archive_members(A__ ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(A__ ).as_posix(), file_path.open('''rb''' )
def __lowerCAmelCase ( self : Tuple , A__ : Tuple ) -> Tuple:
'''simple docstring'''
if not isinstance(A__ , A__ ):
a__ : int = [paths]
for path in paths:
if os.path.isfile(A__ ):
if os.path.basename(A__ ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(A__ ):
if os.path.basename(A__ ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(A__ ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(A__ , A__ )
| 688 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = LayoutLMTokenizer
__UpperCamelCase = LayoutLMTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def __lowerCAmelCase ( self : Any ) -> Any:
'''simple docstring'''
super().setUp()
a__ : Optional[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self : Union[str, Any] , **A__ : Any ) -> Tuple:
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **A__ )
def __lowerCAmelCase ( self : int , A__ : int ) -> str:
'''simple docstring'''
a__ : List[str] = '''UNwant\u00E9d,running'''
a__ : str = '''unwanted, running'''
return input_text, output_text
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
a__ : Tuple = self.tokenizer_class(self.vocab_file )
a__ : List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [7, 4, 5, 1_0, 8, 9] )
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
| 688 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = LxmertTokenizer
__UpperCamelCase = LxmertTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def __lowerCAmelCase ( self : str ) -> str:
'''simple docstring'''
super().setUp()
a__ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self : int , A__ : int ) -> int:
'''simple docstring'''
a__ : List[Any] = '''UNwant\u00E9d,running'''
a__ : Optional[int] = '''unwanted, running'''
return input_text, output_text
def __lowerCAmelCase ( self : int ) -> Dict:
'''simple docstring'''
a__ : Optional[int] = self.tokenizer_class(self.vocab_file )
a__ : List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [7, 4, 5, 1_0, 8, 9] )
def __lowerCAmelCase ( self : Any ) -> Dict:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a__ : Union[str, Any] = self.get_tokenizer()
a__ : Union[str, Any] = self.get_rust_tokenizer()
a__ : str = '''I was born in 92000, and this is falsé.'''
a__ : Tuple = tokenizer.tokenize(A__ )
a__ : Tuple = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
a__ : Optional[int] = tokenizer.encode(A__ , add_special_tokens=A__ )
a__ : Optional[Any] = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
a__ : List[str] = self.get_rust_tokenizer()
a__ : str = tokenizer.encode(A__ )
a__ : int = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
| 688 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : str ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
a__ : str = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
sd_pipe.set_scheduler('''sample_euler''' )
a__ : List[str] = '''A painting of a squirrel eating a burger'''
a__ : Optional[Any] = torch.manual_seed(0 )
a__ : List[str] = sd_pipe([prompt] , generator=A__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='''np''' )
a__ : Optional[int] = output.images
a__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a__ : List[str] = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
a__ : List[str] = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
a__ : Tuple = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
sd_pipe.set_scheduler('''sample_euler''' )
a__ : List[Any] = '''A painting of a squirrel eating a burger'''
a__ : Dict = torch.manual_seed(0 )
a__ : Tuple = sd_pipe([prompt] , generator=A__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='''np''' )
a__ : Tuple = output.images
a__ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a__ : List[Any] = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
'''simple docstring'''
a__ : Optional[Any] = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
a__ : int = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
a__ : Optional[Any] = '''A painting of a squirrel eating a burger'''
a__ : Union[str, Any] = torch.manual_seed(0 )
a__ : Optional[Any] = sd_pipe(
[prompt] , generator=A__ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='''np''' , use_karras_sigmas=A__ , )
a__ : Tuple = output.images
a__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a__ : Optional[Any] = np.array(
[0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 688 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str ):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
a__ : Dict = TapasConfig.from_json_file(lowerCAmelCase__ )
# set absolute/relative position embeddings parameter
a__ : List[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
a__ : Optional[Any] = TapasForQuestionAnswering(config=lowerCAmelCase__ )
elif task == "WTQ":
# run_task_main.py hparams
a__ : List[str] = 4
a__ : Optional[int] = True
# hparam_utils.py hparams
a__ : List[Any] = 0.664694
a__ : List[Any] = 0.207951
a__ : Union[str, Any] = 0.121194
a__ : Optional[Any] = True
a__ : Optional[int] = True
a__ : List[str] = False
a__ : Union[str, Any] = 0.0352513
a__ : Any = TapasForQuestionAnswering(config=lowerCAmelCase__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
a__ : Tuple = 4
a__ : Dict = False
# hparam_utils.py hparams
a__ : str = 36.4519
a__ : str = 0.903421
a__ : Optional[Any] = 222.088
a__ : Dict = True
a__ : Dict = True
a__ : Dict = True
a__ : str = 0.763141
a__ : List[Any] = TapasForQuestionAnswering(config=lowerCAmelCase__ )
elif task == "TABFACT":
a__ : List[str] = TapasForSequenceClassification(config=lowerCAmelCase__ )
elif task == "MLM":
a__ : Tuple = TapasForMaskedLM(config=lowerCAmelCase__ )
elif task == "INTERMEDIATE_PRETRAINING":
a__ : List[str] = TapasModel(config=lowerCAmelCase__ )
else:
raise ValueError(F'Task {task} not supported.' )
print(F'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model (weights and configuration)
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowerCAmelCase__ )
# Save tokenizer files
print(F'Save tokenizer files to {pytorch_dump_path}' )
a__ : Optional[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + '''vocab.txt''' , model_max_length=512 )
tokenizer.save_pretrained(lowerCAmelCase__ )
print('''Used relative position embeddings:''' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 688 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def __a ( lowerCAmelCase__ : str , lowerCAmelCase__ : str=False ):
a__ : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'module.blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'module.blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'module.blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'module.blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'module.blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
a__ : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __a ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any=False ):
for i in range(config.num_hidden_layers ):
if base_model:
a__ : List[str] = ''''''
else:
a__ : Any = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a__ : Dict = state_dict.pop(F'module.blocks.{i}.attn.qkv.weight' )
a__ : List[Any] = state_dict.pop(F'module.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
a__ : List[str] = in_proj_weight[
: config.hidden_size, :
]
a__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
a__ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a__ : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
a__ : List[Any] = in_proj_bias[-config.hidden_size :]
def __a ( lowerCAmelCase__ : str ):
a__ : List[str] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( lowerCAmelCase__ : List[Any] ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
a__ : Dict = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] ):
a__ : Dict = dct.pop(lowerCAmelCase__ )
a__ : Tuple = val
def __a ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : int ):
a__ : Union[str, Any] = ViTMSNConfig()
a__ : Dict = 1000
a__ : Dict = '''datasets/huggingface/label-files'''
a__ : Tuple = '''imagenet-1k-id2label.json'''
a__ : int = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ ) , '''r''' ) )
a__ : Optional[Any] = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
a__ : List[str] = idalabel
a__ : str = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
a__ : List[str] = 384
a__ : Tuple = 1536
a__ : Dict = 6
elif "l16" in checkpoint_url:
a__ : List[str] = 1024
a__ : Optional[Any] = 4096
a__ : Optional[Any] = 24
a__ : List[Any] = 16
a__ : Any = 0.1
elif "b4" in checkpoint_url:
a__ : Tuple = 4
elif "l7" in checkpoint_url:
a__ : Optional[Any] = 7
a__ : Tuple = 1024
a__ : Tuple = 4096
a__ : Any = 24
a__ : Optional[Any] = 16
a__ : Tuple = 0.1
a__ : Dict = ViTMSNModel(lowerCAmelCase__ )
a__ : Optional[Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='''cpu''' )['''target_encoder''']
a__ : List[str] = ViTImageProcessor(size=config.image_size )
remove_projection_head(lowerCAmelCase__ )
a__ : Any = create_rename_keys(lowerCAmelCase__ , base_model=lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ , base_model=lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
a__ : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a__ : int = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
a__ : int = ViTImageProcessor(
size=config.image_size , image_mean=lowerCAmelCase__ , image_std=lowerCAmelCase__ )
a__ : int = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
a__ : List[Any] = model(**lowerCAmelCase__ )
a__ : Optional[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
a__ : str = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
a__ : str = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
a__ : Tuple = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
a__ : Optional[Any] = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
a__ : Optional[int] = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , lowerCAmelCase__ , atol=1E-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 688 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
__SCREAMING_SNAKE_CASE = {
'google/fnet-base': 5_1_2,
'google/fnet-large': 5_1_2,
}
__SCREAMING_SNAKE_CASE = '▁'
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "token_type_ids"]
__UpperCamelCase = FNetTokenizer
def __init__( self : Any , A__ : Any=None , A__ : int=None , A__ : List[str]=False , A__ : int=True , A__ : str=True , A__ : List[Any]="<unk>" , A__ : Dict="[SEP]" , A__ : List[str]="<pad>" , A__ : Union[str, Any]="[CLS]" , A__ : Dict="[MASK]" , **A__ : Tuple , ) -> List[str]:
'''simple docstring'''
a__ : Optional[int] = (
AddedToken(A__ , lstrip=A__ , rstrip=A__ , normalized=A__ )
if isinstance(A__ , A__ )
else mask_token
)
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , remove_space=A__ , keep_accents=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , **A__ , )
a__ : Optional[Any] = do_lower_case
a__ : Dict = remove_space
a__ : List[Any] = keep_accents
a__ : Optional[Any] = vocab_file
a__ : Any = False if not self.vocab_file else True
def __lowerCAmelCase ( self : str , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : Optional[int] = [self.sep_token_id]
a__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCAmelCase ( self : List[Any] , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : Dict = [self.sep_token_id]
a__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Tuple , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a__ : Union[str, Any] = os.path.join(
A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ):
copyfile(self.vocab_file , A__ )
return (out_vocab_file,)
| 688 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def __a ( lowerCAmelCase__ : float , lowerCAmelCase__ : int ):
a__ : List[str] = u
for i in range(1 , lowerCAmelCase__ ):
a__ : str = temp * (u - i)
return temp
def __a ( ):
a__ : Any = int(input('''enter the numbers of values: ''' ) )
a__ : list[list[float]] = []
for _ in range(lowerCAmelCase__ ):
y.append([] )
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
y[i].append(lowerCAmelCase__ )
a__ : Union[str, Any] = 0
print('''enter the values of parameters in a list: ''' )
a__ : Any = list(map(lowerCAmelCase__ , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(lowerCAmelCase__ ):
a__ : Dict = float(input() )
a__ : Optional[Any] = int(input('''enter the value to interpolate: ''' ) )
a__ : Optional[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , lowerCAmelCase__ ):
for j in range(n - i ):
a__ : Union[str, Any] = y[j + 1][i - 1] - y[j][i - 1]
a__ : Optional[Any] = y[0][0]
for i in range(1 , lowerCAmelCase__ ):
summ += (ucal(lowerCAmelCase__ , lowerCAmelCase__ ) * y[0][i]) / math.factorial(lowerCAmelCase__ )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 688 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-german-cased': (
'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'
),
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'
),
},
}
__SCREAMING_SNAKE_CASE = {
'distilbert-base-uncased': 5_1_2,
'distilbert-base-uncased-distilled-squad': 5_1_2,
'distilbert-base-cased': 5_1_2,
'distilbert-base-cased-distilled-squad': 5_1_2,
'distilbert-base-german-cased': 5_1_2,
'distilbert-base-multilingual-cased': 5_1_2,
}
__SCREAMING_SNAKE_CASE = {
'distilbert-base-uncased': {'do_lower_case': True},
'distilbert-base-uncased-distilled-squad': {'do_lower_case': True},
'distilbert-base-cased': {'do_lower_case': False},
'distilbert-base-cased-distilled-squad': {'do_lower_case': False},
'distilbert-base-german-cased': {'do_lower_case': False},
'distilbert-base-multilingual-cased': {'do_lower_case': False},
}
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = ["input_ids", "attention_mask"]
__UpperCamelCase = DistilBertTokenizer
def __init__( self : str , A__ : Optional[Any]=None , A__ : Any=None , A__ : Tuple=True , A__ : List[Any]="[UNK]" , A__ : List[str]="[SEP]" , A__ : Tuple="[PAD]" , A__ : Optional[int]="[CLS]" , A__ : Union[str, Any]="[MASK]" , A__ : List[str]=True , A__ : Any=None , **A__ : int , ) -> str:
'''simple docstring'''
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , tokenize_chinese_chars=A__ , strip_accents=A__ , **A__ , )
a__ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , A__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , A__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , A__ ) != tokenize_chinese_chars
):
a__ : int = getattr(A__ , normalizer_state.pop('''type''' ) )
a__ : List[Any] = do_lower_case
a__ : str = strip_accents
a__ : List[str] = tokenize_chinese_chars
a__ : Dict = normalizer_class(**A__ )
a__ : List[Any] = do_lower_case
def __lowerCAmelCase ( self : Tuple , A__ : List[str] , A__ : Dict=None ) -> List[str]:
'''simple docstring'''
a__ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : int , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : List[str] = [self.sep_token_id]
a__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : str , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
a__ : int = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
| 688 | 1 |
'''simple docstring'''
from __future__ import annotations
def __a ( lowerCAmelCase__ : str , lowerCAmelCase__ : list[str] | None = None , lowerCAmelCase__ : dict[str, float] | None = None , lowerCAmelCase__ : bool = False , ):
a__ : Any = cipher_alphabet or [chr(lowerCAmelCase__ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
a__ : Tuple = {
'''a''': 0.08497,
'''b''': 0.01492,
'''c''': 0.02202,
'''d''': 0.04253,
'''e''': 0.11162,
'''f''': 0.02228,
'''g''': 0.02015,
'''h''': 0.06094,
'''i''': 0.07546,
'''j''': 0.00153,
'''k''': 0.01292,
'''l''': 0.04025,
'''m''': 0.02406,
'''n''': 0.06749,
'''o''': 0.07507,
'''p''': 0.01929,
'''q''': 0.00095,
'''r''': 0.07587,
'''s''': 0.06327,
'''t''': 0.09356,
'''u''': 0.02758,
'''v''': 0.00978,
'''w''': 0.02560,
'''x''': 0.00150,
'''y''': 0.01994,
'''z''': 0.00077,
}
else:
# Custom frequencies dictionary
a__ : Dict = frequencies_dict
if not case_sensitive:
a__ : Optional[Any] = ciphertext.lower()
# Chi squared statistic values
a__ : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(lowerCAmelCase__ ) ):
a__ : Optional[int] = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
a__ : Any = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowerCAmelCase__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
a__ : Union[str, Any] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
a__ : int = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
a__ : Tuple = decrypted_with_shift.lower().count(lowerCAmelCase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
a__ : List[str] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
a__ : Any = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
a__ : List[Any] = decrypted_with_shift.count(lowerCAmelCase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
a__ : Optional[Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
a__ : Optional[Any] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
a__ : Optional[Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowerCAmelCase__ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
a__ : int = min(
lowerCAmelCase__ , key=lowerCAmelCase__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
a__
) , (
a__
) ,
) : Any = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 688 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__SCREAMING_SNAKE_CASE = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__SCREAMING_SNAKE_CASE = tuple[int, int]
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : str , A__ : int , A__ : int , A__ : int , A__ : int , A__ : int , A__ : Node | None , ) -> None:
'''simple docstring'''
a__ : Optional[int] = pos_x
a__ : str = pos_y
a__ : Optional[int] = (pos_y, pos_x)
a__ : List[str] = goal_x
a__ : Any = goal_y
a__ : Any = g_cost
a__ : Optional[int] = parent
a__ : Union[str, Any] = self.calculate_heuristic()
a__ : List[Any] = self.g_cost + self.h_cost
def __lowerCAmelCase ( self : Union[str, Any] ) -> float:
'''simple docstring'''
a__ : List[str] = self.pos_x - self.goal_x
a__ : List[str] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(A__ ) + abs(A__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : List[Any] , A__ : Node ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , A__ : TPosition , A__ : TPosition ) -> Optional[Any]:
'''simple docstring'''
a__ : int = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , A__ )
a__ : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , A__ )
a__ : Dict = [self.start]
a__ : list[Node] = []
a__ : str = False
def __lowerCAmelCase ( self : List[str] ) -> list[TPosition]:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
a__ : Dict = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(A__ )
self.closed_nodes.append(A__ )
a__ : List[Any] = self.get_successors(A__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(A__ )
else:
# retrieve the best current path
a__ : Optional[int] = self.open_nodes.pop(self.open_nodes.index(A__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(A__ )
else:
self.open_nodes.append(A__ )
return [self.start.pos]
def __lowerCAmelCase ( self : Optional[Any] , A__ : Node ) -> list[Node]:
'''simple docstring'''
a__ : Optional[int] = []
for action in delta:
a__ : List[Any] = parent.pos_x + action[1]
a__ : Tuple = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
A__ , A__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , A__ , ) )
return successors
def __lowerCAmelCase ( self : List[Any] , A__ : Node | None ) -> list[TPosition]:
'''simple docstring'''
a__ : Union[str, Any] = node
a__ : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
a__ : Any = current_node.parent
path.reverse()
return path
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , A__ : TPosition , A__ : TPosition ) -> None:
'''simple docstring'''
a__ : str = AStar(A__ , A__ )
a__ : Optional[int] = AStar(A__ , A__ )
a__ : List[str] = False
def __lowerCAmelCase ( self : Tuple ) -> list[TPosition]:
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
a__ : int = self.fwd_astar.open_nodes.pop(0 )
a__ : List[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
A__ , A__ )
self.fwd_astar.closed_nodes.append(A__ )
self.bwd_astar.closed_nodes.append(A__ )
a__ : Tuple = current_bwd_node
a__ : Optional[int] = current_fwd_node
a__ : Optional[int] = {
self.fwd_astar: self.fwd_astar.get_successors(A__ ),
self.bwd_astar: self.bwd_astar.get_successors(A__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(A__ )
else:
# retrieve the best current path
a__ : Optional[Any] = astar.open_nodes.pop(
astar.open_nodes.index(A__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(A__ )
else:
astar.open_nodes.append(A__ )
return [self.fwd_astar.start.pos]
def __lowerCAmelCase ( self : List[str] , A__ : Node , A__ : Node ) -> list[TPosition]:
'''simple docstring'''
a__ : str = self.fwd_astar.retrace_path(A__ )
a__ : List[str] = self.bwd_astar.retrace_path(A__ )
bwd_path.pop()
bwd_path.reverse()
a__ : Optional[int] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__SCREAMING_SNAKE_CASE = (0, 0)
__SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__SCREAMING_SNAKE_CASE = time.time()
__SCREAMING_SNAKE_CASE = AStar(init, goal)
__SCREAMING_SNAKE_CASE = a_star.search()
__SCREAMING_SNAKE_CASE = time.time() - start_time
print(f'AStar execution time = {end_time:f} seconds')
__SCREAMING_SNAKE_CASE = time.time()
__SCREAMING_SNAKE_CASE = BidirectionalAStar(init, goal)
__SCREAMING_SNAKE_CASE = time.time() - bd_start_time
print(f'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 688 | 1 |
'''simple docstring'''
from __future__ import annotations
def __a ( lowerCAmelCase__ : dict , lowerCAmelCase__ : str ):
a__ , a__ : int = set(lowerCAmelCase__ ), [start]
while stack:
a__ : List[str] = stack.pop()
explored.add(lowerCAmelCase__ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(lowerCAmelCase__ )
return explored
__SCREAMING_SNAKE_CASE = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 688 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __a ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] ):
# Construct model
if gpta_config_file == "":
a__ : Union[str, Any] = GPTaConfig()
else:
a__ : Dict = GPTaConfig.from_json_file(lowerCAmelCase__ )
a__ : Optional[int] = GPTaModel(lowerCAmelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
a__ : int = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
a__ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 688 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = '▁'
__SCREAMING_SNAKE_CASE = {'vocab_file': 'sentencepiece.bpe.model'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
}
}
__SCREAMING_SNAKE_CASE = {
'facebook/mbart-large-en-ro': 1_0_2_4,
'facebook/mbart-large-cc25': 1_0_2_4,
}
# fmt: off
__SCREAMING_SNAKE_CASE = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = ["input_ids", "attention_mask"]
__UpperCamelCase = []
__UpperCamelCase = []
def __init__( self : Any , A__ : List[str] , A__ : Dict="<s>" , A__ : Optional[int]="</s>" , A__ : int="</s>" , A__ : Any="<s>" , A__ : Any="<unk>" , A__ : Dict="<pad>" , A__ : Optional[int]="<mask>" , A__ : int=None , A__ : str=None , A__ : List[str]=None , A__ : Optional[Dict[str, Any]] = None , A__ : Any=None , **A__ : Dict , ) -> List[str]:
'''simple docstring'''
a__ : Optional[int] = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else mask_token
a__ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A__ , eos_token=A__ , unk_token=A__ , sep_token=A__ , cls_token=A__ , pad_token=A__ , mask_token=A__ , tokenizer_file=A__ , src_lang=A__ , tgt_lang=A__ , additional_special_tokens=A__ , sp_model_kwargs=self.sp_model_kwargs , **A__ , )
a__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A__ ) )
a__ : Any = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
a__ : List[str] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a__ : Union[str, Any] = 1
a__ : int = len(self.sp_model )
a__ : List[str] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(A__ )
}
a__ : List[str] = {v: k for k, v in self.lang_code_to_id.items()}
a__ : str = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
a__ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
a__ : Optional[int] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
a__ : List[Any] = src_lang if src_lang is not None else '''en_XX'''
a__ : Optional[Any] = self.lang_code_to_id[self._src_lang]
a__ : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[str] ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] = self.__dict__.copy()
a__ : Any = None
a__ : List[str] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Any , A__ : Optional[Any] ) -> Any:
'''simple docstring'''
a__ : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a__ : Any = {}
a__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __lowerCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __lowerCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __lowerCAmelCase ( self : Tuple , A__ : str ) -> None:
'''simple docstring'''
a__ : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowerCAmelCase ( self : int , A__ : List[int] , A__ : Optional[List[int]] = None , A__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
a__ : str = [1] * len(self.prefix_tokens )
a__ : Optional[Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A__ )) + suffix_ones
return prefix_ones + ([0] * len(A__ )) + ([0] * len(A__ )) + suffix_ones
def __lowerCAmelCase ( self : Tuple , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowerCAmelCase ( self : Tuple , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : Optional[Any] = [self.sep_token_id]
a__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self : Optional[Any] , A__ : Tuple , A__ : str , A__ : Optional[str] , A__ : Optional[str] , **A__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
a__ : Dict = src_lang
a__ : Optional[int] = self(A__ , add_special_tokens=A__ , return_tensors=A__ , **A__ )
a__ : Optional[Any] = self.convert_tokens_to_ids(A__ )
a__ : Optional[int] = tgt_lang_id
return inputs
def __lowerCAmelCase ( self : int ) -> str:
'''simple docstring'''
a__ : Optional[Any] = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self : Any , A__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(A__ , out_type=A__ )
def __lowerCAmelCase ( self : List[Any] , A__ : List[str] ) -> Tuple:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a__ : Optional[Any] = self.sp_model.PieceToId(A__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self : Optional[int] , A__ : Tuple ) -> List[Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self : Optional[Any] , A__ : List[str] ) -> Dict:
'''simple docstring'''
a__ : str = ''''''.join(A__ ).replace(A__ , ''' ''' ).strip()
return out_string
def __lowerCAmelCase ( self : List[Any] , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a__ : int = os.path.join(
A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A__ )
elif not os.path.isfile(self.vocab_file ):
with open(A__ , '''wb''' ) as fi:
a__ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (out_vocab_file,)
def __lowerCAmelCase ( self : Optional[Any] , A__ : List[str] , A__ : str = "en_XX" , A__ : Optional[List[str]] = None , A__ : str = "ro_RO" , **A__ : Optional[Any] , ) -> BatchEncoding:
'''simple docstring'''
a__ : List[str] = src_lang
a__ : List[str] = tgt_lang
return super().prepare_seqaseq_batch(A__ , A__ , **A__ )
def __lowerCAmelCase ( self : str ) -> List[str]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowerCAmelCase ( self : str , A__ : Tuple ) -> None:
'''simple docstring'''
a__ : int = self.lang_code_to_id[src_lang]
a__ : int = []
a__ : Any = [self.eos_token_id, self.cur_lang_code]
def __lowerCAmelCase ( self : Optional[int] , A__ : str ) -> None:
'''simple docstring'''
a__ : str = self.lang_code_to_id[lang]
a__ : Optional[Any] = []
a__ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
| 688 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
__SCREAMING_SNAKE_CASE = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
__SCREAMING_SNAKE_CASE = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
__SCREAMING_SNAKE_CASE = reader.read()
__SCREAMING_SNAKE_CASE = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
__SCREAMING_SNAKE_CASE = UNetaDModel(**config)
else:
__SCREAMING_SNAKE_CASE = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
__SCREAMING_SNAKE_CASE = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__SCREAMING_SNAKE_CASE = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__SCREAMING_SNAKE_CASE = config[key]
del config[key]
__SCREAMING_SNAKE_CASE = [k.replace('UNetRes', '') for k in config['down_block_types']]
__SCREAMING_SNAKE_CASE = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
__SCREAMING_SNAKE_CASE = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
__SCREAMING_SNAKE_CASE = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
__SCREAMING_SNAKE_CASE = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
__SCREAMING_SNAKE_CASE = param_value
__SCREAMING_SNAKE_CASE = True
if not has_changed:
__SCREAMING_SNAKE_CASE = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 688 | 1 |
'''simple docstring'''
from itertools import permutations
def __a ( lowerCAmelCase__ : tuple ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
a__ : Dict = [7, 11, 13, 17]
for i, test in enumerate(lowerCAmelCase__ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __a ( lowerCAmelCase__ : int = 10 ):
return sum(
int(''''''.join(map(lowerCAmelCase__ , lowerCAmelCase__ ) ) )
for num in permutations(range(lowerCAmelCase__ ) )
if is_substring_divisible(lowerCAmelCase__ ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 688 |
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = (KDPMaDiscreteScheduler,)
__UpperCamelCase = 10
def __lowerCAmelCase ( self : Optional[Any] , **A__ : Optional[int] ) -> int:
'''simple docstring'''
a__ : Optional[int] = {
'''num_train_timesteps''': 1_1_0_0,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**A__ )
return config
def __lowerCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A__ )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=A__ , beta_end=A__ )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A__ )
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A__ )
def __lowerCAmelCase ( self : str ) -> Optional[int]:
'''simple docstring'''
a__ : Any = self.scheduler_classes[0]
a__ : str = self.get_scheduler_config(prediction_type='''v_prediction''' )
a__ : Dict = scheduler_class(**A__ )
scheduler.set_timesteps(self.num_inference_steps )
a__ : Tuple = self.dummy_model()
a__ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
a__ : Dict = sample.to(A__ )
for i, t in enumerate(scheduler.timesteps ):
a__ : Optional[Any] = scheduler.scale_model_input(A__ , A__ )
a__ : Union[str, Any] = model(A__ , A__ )
a__ : List[str] = scheduler.step(A__ , A__ , A__ )
a__ : Optional[Any] = output.prev_sample
a__ : Tuple = torch.sum(torch.abs(A__ ) )
a__ : Optional[int] = torch.mean(torch.abs(A__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0_002 ) < 1E-3
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
if torch_device == "mps":
return
a__ : List[Any] = self.scheduler_classes[0]
a__ : Tuple = self.get_scheduler_config()
a__ : Tuple = scheduler_class(**A__ )
scheduler.set_timesteps(self.num_inference_steps )
a__ : List[Any] = self.dummy_model()
a__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
a__ : Any = sample.to(A__ )
for i, t in enumerate(scheduler.timesteps ):
a__ : str = scheduler.scale_model_input(A__ , A__ )
a__ : List[str] = model(A__ , A__ )
a__ : str = scheduler.step(A__ , A__ , A__ )
a__ : List[Any] = output.prev_sample
a__ : Dict = torch.sum(torch.abs(A__ ) )
a__ : Optional[Any] = torch.mean(torch.abs(A__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
def __lowerCAmelCase ( self : str ) -> int:
'''simple docstring'''
if torch_device == "mps":
return
a__ : Optional[int] = self.scheduler_classes[0]
a__ : Tuple = self.get_scheduler_config()
a__ : List[Any] = scheduler_class(**A__ )
scheduler.set_timesteps(self.num_inference_steps , device=A__ )
a__ : Union[str, Any] = self.dummy_model()
a__ : List[Any] = self.dummy_sample_deter.to(A__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
a__ : Optional[int] = scheduler.scale_model_input(A__ , A__ )
a__ : List[Any] = model(A__ , A__ )
a__ : Any = scheduler.step(A__ , A__ , A__ )
a__ : List[str] = output.prev_sample
a__ : Any = torch.sum(torch.abs(A__ ) )
a__ : Union[str, Any] = torch.mean(torch.abs(A__ ) )
if str(A__ ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
| 688 | 1 |
'''simple docstring'''
from math import pi, sqrt, tan
def __a ( lowerCAmelCase__ : float ):
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def __a ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __a ( lowerCAmelCase__ : float ):
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def __a ( lowerCAmelCase__ : float ):
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def __a ( lowerCAmelCase__ : float , lowerCAmelCase__ : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __a ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
a__ : List[str] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __a ( lowerCAmelCase__ : float , lowerCAmelCase__ : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def __a ( lowerCAmelCase__ : float , lowerCAmelCase__ : float ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(lowerCAmelCase__ , 2 ) * torus_radius * tube_radius
def __a ( lowerCAmelCase__ : float , lowerCAmelCase__ : float ):
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def __a ( lowerCAmelCase__ : float ):
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def __a ( lowerCAmelCase__ : float , lowerCAmelCase__ : float ):
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def __a ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
a__ : int = (sidea + sidea + sidea) / 2
a__ : Union[str, Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __a ( lowerCAmelCase__ : float , lowerCAmelCase__ : float ):
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def __a ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def __a ( lowerCAmelCase__ : float ):
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def __a ( lowerCAmelCase__ : float , lowerCAmelCase__ : float ):
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def __a ( lowerCAmelCase__ : float , lowerCAmelCase__ : float ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : float ):
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f'Rectangle: {area_rectangle(1_0, 2_0) = }')
print(f'Square: {area_square(1_0) = }')
print(f'Triangle: {area_triangle(1_0, 1_0) = }')
print(f'Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }')
print(f'Parallelogram: {area_parallelogram(1_0, 2_0) = }')
print(f'Rhombus: {area_rhombus(1_0, 2_0) = }')
print(f'Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }')
print(f'Circle: {area_circle(2_0) = }')
print(f'Ellipse: {area_ellipse(1_0, 2_0) = }')
print('\nSurface Areas of various geometric shapes: \n')
print(f'Cube: {surface_area_cube(2_0) = }')
print(f'Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }')
print(f'Sphere: {surface_area_sphere(2_0) = }')
print(f'Hemisphere: {surface_area_hemisphere(2_0) = }')
print(f'Cone: {surface_area_cone(1_0, 2_0) = }')
print(f'Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }')
print(f'Cylinder: {surface_area_cylinder(1_0, 2_0) = }')
print(f'Torus: {surface_area_torus(2_0, 1_0) = }')
print(f'Equilateral Triangle: {area_reg_polygon(3, 1_0) = }')
print(f'Square: {area_reg_polygon(4, 1_0) = }')
print(f'Reqular Pentagon: {area_reg_polygon(5, 1_0) = }')
| 688 |
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
a__ : str = ['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
a__ , a__ : List[Any] = get_aligned_output_features_output_indices(A__ , A__ , A__ )
self.assertEqual(A__ , ['''c'''] )
self.assertEqual(A__ , [2] )
# Out indices set to match out features
a__ , a__ : Optional[int] = get_aligned_output_features_output_indices(['''a''', '''c'''] , A__ , A__ )
self.assertEqual(A__ , ['''a''', '''c'''] )
self.assertEqual(A__ , [0, 2] )
# Out features set to match out indices
a__ , a__ : int = get_aligned_output_features_output_indices(A__ , [0, 2] , A__ )
self.assertEqual(A__ , ['''a''', '''c'''] )
self.assertEqual(A__ , [0, 2] )
# Out features selected from negative indices
a__ , a__ : List[str] = get_aligned_output_features_output_indices(A__ , [-3, -1] , A__ )
self.assertEqual(A__ , ['''a''', '''c'''] )
self.assertEqual(A__ , [-3, -1] )
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , A__ )
# Out features must be a list
with self.assertRaises(A__ ):
verify_out_features_out_indices(('''a''', '''b''') , (0, 1) , ['''a''', '''b'''] )
# Out features must be a subset of stage names
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , ['''a'''] )
# Out indices must be a list or tuple
with self.assertRaises(A__ ):
verify_out_features_out_indices(A__ , 0 , ['''a''', '''b'''] )
# Out indices must be a subset of stage names
with self.assertRaises(A__ ):
verify_out_features_out_indices(A__ , (0, 1) , ['''a'''] )
# Out features and out indices must be the same length
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0,) , ['''a''', '''b''', '''c'''] )
# Out features should match out indices
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 2) , ['''a''', '''b''', '''c'''] )
# Out features and out indices should be in order
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''b''', '''a'''] , (0, 1) , ['''a''', '''b'''] )
# Check passes with valid inputs
verify_out_features_out_indices(['''a''', '''b''', '''d'''] , (0, 1, -1) , ['''a''', '''b''', '''c''', '''d'''] )
def __lowerCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
a__ : Optional[Any] = BackboneMixin()
a__ : int = ['''a''', '''b''', '''c''']
a__ : List[Any] = ['''a''', '''c''']
a__ : Tuple = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
a__ : Dict = ['''a''', '''b''']
self.assertEqual(backbone.out_features , ['''a''', '''b'''] )
self.assertEqual(backbone.out_indices , [0, 1] )
a__ : int = [-3, -1]
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 688 | 1 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def __a ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] ):
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str ):
a__ : Any = tmp_path / '''cache'''
a__ : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a__ : Optional[Any] = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read()
_check_sql_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __a ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : str ):
a__ : Union[str, Any] = tmp_path / '''cache'''
a__ : int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
a__ : Optional[Any] = features.copy() if features else default_expected_features
a__ : Tuple = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
a__ : Dict = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_sql_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( lowerCAmelCase__ : List[str] ):
with contextlib.closing(sqlitea.connect(lowerCAmelCase__ ) ) as con:
a__ : Tuple = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] ):
a__ : Any = tmp_path / '''cache'''
a__ : int = os.path.join(lowerCAmelCase__ , '''tmp.sql''' )
a__ : int = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=lowerCAmelCase__ ).read()
SqlDatasetWriter(lowerCAmelCase__ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
a__ : List[Any] = iter_sql_file(lowerCAmelCase__ )
a__ : Tuple = iter_sql_file(lowerCAmelCase__ )
for rowa, rowa in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
assert rowa == rowa
@require_sqlalchemy
def __a ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple ):
a__ : Tuple = tmp_path / '''cache'''
a__ : Dict = os.path.join(lowerCAmelCase__ , '''tmp.sql''' )
a__ : Tuple = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=lowerCAmelCase__ ).read()
SqlDatasetWriter(lowerCAmelCase__ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
a__ : Dict = iter_sql_file(lowerCAmelCase__ )
a__ : int = iter_sql_file(lowerCAmelCase__ )
for rowa, rowa in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
assert rowa == rowa
@require_sqlalchemy
def __a ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict ):
a__ : str = tmp_path / '''cache'''
a__ : Tuple = os.path.join(lowerCAmelCase__ , '''tmp.sql''' )
a__ : List[str] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=lowerCAmelCase__ ).read()
with pytest.raises(lowerCAmelCase__ ):
SqlDatasetWriter(lowerCAmelCase__ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 688 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __a ( lowerCAmelCase__ : List[Any] ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def __a ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any ):
a__ : Dict = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
a__ : Any = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' )
a__ : int = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' )
a__ : Optional[Any] = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' )
a__ : Dict = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' )
a__ : List[str] = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' )
a__ : List[Any] = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' )
a__ : str = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' )
a__ : List[Any] = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' )
a__ : List[Any] = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' )
a__ : str = key.replace('''image_encoder.module''' , '''flava.image_model''' )
a__ : Dict = key.replace('''text_encoder.module''' , '''flava.text_model''' )
a__ : List[Any] = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' )
a__ : List[str] = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' )
a__ : List[str] = key.replace('''text_projection''' , '''flava.text_projection''' )
a__ : Any = key.replace('''image_projection''' , '''flava.image_projection''' )
a__ : Any = value.float()
for key, value in codebook_state_dict.items():
a__ : List[str] = value
return upgrade
@torch.no_grad()
def __a ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict=None ):
if config_path is not None:
a__ : Tuple = FlavaConfig.from_pretrained(lowerCAmelCase__ )
else:
a__ : Optional[int] = FlavaConfig()
a__ : List[Any] = FlavaForPreTraining(lowerCAmelCase__ ).eval()
a__ : Optional[int] = convert_dalle_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , save_checkpoint=lowerCAmelCase__ )
if os.path.exists(lowerCAmelCase__ ):
a__ : List[str] = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
else:
a__ : Dict = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='''cpu''' )
a__ : List[Any] = upgrade_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
hf_model.load_state_dict(lowerCAmelCase__ )
a__ : Any = hf_model.state_dict()
a__ : Optional[Any] = count_parameters(lowerCAmelCase__ )
a__ : int = count_parameters(lowerCAmelCase__ ) + count_parameters(lowerCAmelCase__ )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 )
hf_model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 688 | 1 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__SCREAMING_SNAKE_CASE = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Dict , A__ : Union[str, Any] , A__ : str=1_6 , A__ : List[str]=1_3 , A__ : int=7 , A__ : Optional[int]=1_4 , A__ : int=1_0 , A__ : Any=1_9 , A__ : str=5 , A__ : Any=4 , A__ : List[Any]=True , A__ : Optional[int]=1_6 , A__ : Tuple=2 , A__ : Tuple=4 , A__ : Dict=4 , A__ : int="gelu" , A__ : Optional[Any]=0.1 , A__ : Dict=0.1 , A__ : List[str]=[1, 2, 3, 4, 5] , A__ : Dict=2_5 , A__ : Union[str, Any]=5 , ) -> Tuple:
'''simple docstring'''
a__ : str = d_model
a__ : List[Any] = parent
a__ : Tuple = batch_size
a__ : Dict = prediction_length
a__ : Optional[Any] = context_length
a__ : List[Any] = cardinality
a__ : str = num_time_features
a__ : Optional[Any] = lags_sequence
a__ : Optional[int] = embedding_dimension
a__ : List[str] = is_training
a__ : Dict = hidden_size
a__ : str = num_hidden_layers
a__ : Optional[Any] = num_attention_heads
a__ : Optional[Any] = intermediate_size
a__ : Optional[int] = hidden_act
a__ : int = hidden_dropout_prob
a__ : Any = attention_probs_dropout_prob
a__ : List[str] = context_length
a__ : Any = prediction_length + label_length
a__ : Any = label_length
a__ : List[Any] = moving_average
a__ : List[Any] = autocorrelation_factor
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def __lowerCAmelCase ( self : List[str] , A__ : Any ) -> Union[str, Any]:
'''simple docstring'''
a__ : Union[str, Any] = config.context_length + max(config.lags_sequence )
a__ : List[str] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
a__ : Dict = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
a__ : int = floats_tensor([self.batch_size, _past_length] )
a__ : Optional[int] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
a__ : Union[str, Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
a__ : Union[str, Any] = floats_tensor([self.batch_size, config.prediction_length] )
a__ : Union[str, Any] = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
a__ : Optional[int] = self.get_config()
a__ : List[str] = self.prepare_autoformer_inputs_dict(A__ )
return config, inputs_dict
def __lowerCAmelCase ( self : int ) -> Any:
'''simple docstring'''
a__ , a__ : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCAmelCase ( self : List[Any] , A__ : List[Any] , A__ : str ) -> Union[str, Any]:
'''simple docstring'''
a__ : List[Any] = AutoformerModel(config=A__ ).to(A__ ).eval()
a__ : List[str] = model(**A__ )
a__ : Union[str, Any] = outputs.encoder_last_hidden_state
a__ : Dict = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : List[Any] = model.get_encoder()
encoder.save_pretrained(A__ )
a__ : Tuple = AutoformerEncoder.from_pretrained(A__ ).to(A__ )
a__ , a__ , a__ , a__ , a__ : Dict = model.create_network_inputs(**A__ )
a__ , a__ : int = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
a__ : Dict = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
a__ : Optional[int] = encoder(inputs_embeds=A__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
a__ : int = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
a__ : str = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
a__ : List[str] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
a__ : List[Any] = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : List[str] = model.get_decoder()
decoder.save_pretrained(A__ )
a__ : Union[str, Any] = AutoformerDecoder.from_pretrained(A__ ).to(A__ )
a__ : Tuple = decoder(
trend=A__ , inputs_embeds=A__ , encoder_hidden_states=A__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowerCAmelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__UpperCamelCase = (AutoformerForPrediction,) if is_torch_available() else ()
__UpperCamelCase = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def __lowerCAmelCase ( self : Optional[Any] ) -> Any:
'''simple docstring'''
a__ : Optional[int] = AutoformerModelTester(self )
a__ : Optional[int] = ConfigTester(self , config_class=A__ , has_text_modality=A__ )
def __lowerCAmelCase ( self : int ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
a__ : Dict = model_class(A__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A__ )
a__ , a__ : Dict = model_class.from_pretrained(A__ , output_loading_info=A__ )
self.assertEqual(info['''missing_keys'''] , [] )
def __lowerCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
a__ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*A__ )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
a__ : Tuple = inspect.signature(getattr(A__ , '''forward''' ) )
# The main input is the name of the argument after `self`
a__ : str = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , A__ )
def __lowerCAmelCase ( self : Dict ) -> List[str]:
'''simple docstring'''
a__ , a__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : int = model_class(A__ )
a__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Dict = [*signature.parameters.keys()]
a__ : Optional[Any] = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(A__ )] , A__ )
def __lowerCAmelCase ( self : Tuple ) -> Any:
'''simple docstring'''
a__ , a__ : int = self.model_tester.prepare_config_and_inputs_for_common()
a__ : List[str] = True
a__ : List[Any] = getattr(self.model_tester , '''seq_length''' , A__ )
a__ : Any = getattr(self.model_tester , '''decoder_seq_length''' , A__ )
a__ : str = getattr(self.model_tester , '''encoder_seq_length''' , A__ )
a__ : Any = getattr(self.model_tester , '''d_model''' , A__ )
a__ : Union[str, Any] = getattr(self.model_tester , '''num_attention_heads''' , A__ )
a__ : Tuple = d_model // num_attention_heads
for model_class in self.all_model_classes:
a__ : int = True
a__ : str = False
a__ : Any = True
a__ : Dict = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
a__ : Dict = model(**self._prepare_for_class(A__ , A__ ) )
a__ : Optional[int] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a__ : int = True
a__ : int = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
a__ : int = model(**self._prepare_for_class(A__ , A__ ) )
a__ : str = outputs.encoder_attentions
self.assertEqual(len(A__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
a__ : Tuple = len(A__ )
a__ : str = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(A__ , A__ )
# decoder attentions
a__ : List[str] = outputs.decoder_attentions
self.assertIsInstance(A__ , (list, tuple) )
self.assertEqual(len(A__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
a__ : Tuple = outputs.cross_attentions
self.assertIsInstance(A__ , (list, tuple) )
self.assertEqual(len(A__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
a__ : Dict = True
a__ : int = True
a__ : int = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
a__ : Optional[int] = model(**self._prepare_for_class(A__ , A__ ) )
self.assertEqual(out_len + 2 , len(A__ ) )
a__ : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def __lowerCAmelCase ( self : Optional[Any] ) -> int:
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def __a ( lowerCAmelCase__ : Dict="train-batch.pt" ):
a__ : str = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=lowerCAmelCase__ , repo_type='''dataset''' )
a__ : int = torch.load(lowerCAmelCase__ , map_location=lowerCAmelCase__ )
return batch
@require_torch
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
a__ : Tuple = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A__ )
a__ : Union[str, Any] = prepare_batch()
with torch.no_grad():
a__ : Tuple = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
a__ : Union[str, Any] = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , A__ )
a__ : Union[str, Any] = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=A__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , A__ , atol=A__ ) )
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
a__ : Any = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A__ )
a__ : str = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
a__ : Any = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
a__ : List[Any] = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , A__ )
a__ : List[Any] = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=A__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , A__ , atol=A__ ) )
def __lowerCAmelCase ( self : Tuple ) -> Any:
'''simple docstring'''
a__ : Optional[int] = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A__ )
a__ : Union[str, Any] = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
a__ : Optional[int] = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
a__ : Dict = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , A__ )
a__ : Optional[Any] = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=A__ )
a__ : List[str] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , A__ , rtol=1E-1 ) )
| 688 |
'''simple docstring'''
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__SCREAMING_SNAKE_CASE = 4
__SCREAMING_SNAKE_CASE = 3
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
pass
def __a ( lowerCAmelCase__ : List[str] ):
for shard in shards:
for i in range(lowerCAmelCase__ ):
yield {"i": i, "shard": shard}
def __a ( ):
a__ : str = int(os.environ['''RANK'''] )
a__ : int = int(os.environ['''WORLD_SIZE'''] )
a__ : str = ArgumentParser()
parser.add_argument('''--streaming''' , type=lowerCAmelCase__ )
parser.add_argument('''--local_rank''' , type=lowerCAmelCase__ )
parser.add_argument('''--num_workers''' , type=lowerCAmelCase__ , default=0 )
a__ : int = parser.parse_args()
a__ : List[str] = args.streaming
a__ : Dict = args.num_workers
a__ : Dict = {'''shards''': [F'shard_{shard_idx}' for shard_idx in range(lowerCAmelCase__ )]}
a__ : Tuple = IterableDataset.from_generator(lowerCAmelCase__ , gen_kwargs=lowerCAmelCase__ )
if not streaming:
a__ : str = Dataset.from_list(list(lowerCAmelCase__ ) )
a__ : Optional[int] = split_dataset_by_node(lowerCAmelCase__ , rank=lowerCAmelCase__ , world_size=lowerCAmelCase__ )
a__ : Dict = torch.utils.data.DataLoader(lowerCAmelCase__ , num_workers=lowerCAmelCase__ )
a__ : str = NUM_SHARDS * NUM_ITEMS_PER_SHARD
a__ : Dict = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
a__ : str = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'local_size {local_size} != expected_local_size {expected_local_size}' )
if __name__ == "__main__":
main()
| 688 | 1 |
'''simple docstring'''
from collections.abc import Callable
def __a ( lowerCAmelCase__ : Callable[[float], float] , lowerCAmelCase__ : float , lowerCAmelCase__ : float ):
a__ : float = a
a__ : float = b
if function(lowerCAmelCase__ ) == 0: # one of the a or b is a root for the function
return a
elif function(lowerCAmelCase__ ) == 0:
return b
elif (
function(lowerCAmelCase__ ) * function(lowerCAmelCase__ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
a__ : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(lowerCAmelCase__ ) == 0:
return mid
elif function(lowerCAmelCase__ ) * function(lowerCAmelCase__ ) < 0:
a__ : str = mid
else:
a__ : Optional[int] = mid
a__ : List[str] = start + (end - start) / 2.0
return mid
def __a ( lowerCAmelCase__ : float ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 688 |
'''simple docstring'''
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__SCREAMING_SNAKE_CASE = open # noqa: we just need to have a builtin inside this module to test it properly
| 688 | 1 |
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [0, 2, 4, 6, 8]
__SCREAMING_SNAKE_CASE = [1, 3, 5, 7, 9]
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
a__ : Union[str, Any] = 0
for digit in range(10 ):
a__ : List[Any] = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , lowerCAmelCase__ , lowerCAmelCase__ )
return result
a__ : int = 0
for digita in range(10 ):
a__ : List[str] = digita
if (remainder + digita) % 2 == 0:
a__ : Tuple = ODD_DIGITS
else:
a__ : Union[str, Any] = EVEN_DIGITS
for digita in other_parity_digits:
a__ : Tuple = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , lowerCAmelCase__ , lowerCAmelCase__ , )
return result
def __a ( lowerCAmelCase__ : int = 9 ):
a__ : Dict = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(lowerCAmelCase__ , 0 , [0] * length , lowerCAmelCase__ )
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 688 |
'''simple docstring'''
import enum
import shutil
import sys
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = shutil.get_terminal_size()
__SCREAMING_SNAKE_CASE = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class lowerCAmelCase__ ( enum.Enum ):
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 1
def __a ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict="" ):
sys.stdout.write(str(lowerCAmelCase__ ) + end )
sys.stdout.flush()
def __a ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : int="" ):
forceWrite(F'\u001b[{color}m{content}\u001b[0m' , lowerCAmelCase__ )
def __a ( ):
forceWrite('''\r''' )
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : str ):
forceWrite(F'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def __a ( ):
forceWrite(''' ''' * TERMINAL_WIDTH )
reset_cursor()
def __a ( ):
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH )
| 688 | 1 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = "umt5"
__UpperCamelCase = ["past_key_values"]
def __init__( self : List[Any] , A__ : Any=2_5_0_1_1_2 , A__ : Union[str, Any]=5_1_2 , A__ : Dict=6_4 , A__ : List[Any]=1_0_2_4 , A__ : Dict=8 , A__ : str=None , A__ : str=6 , A__ : Any=3_2 , A__ : Tuple=1_2_8 , A__ : Any=0.1 , A__ : List[str]=1E-6 , A__ : int=1.0 , A__ : List[str]="gated-gelu" , A__ : Union[str, Any]=True , A__ : List[str]=True , A__ : Optional[int]="T5Tokenizer" , A__ : Tuple=True , A__ : int=0 , A__ : Union[str, Any]=1 , A__ : Union[str, Any]=0 , **A__ : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(
is_encoder_decoder=A__ , tokenizer_class=A__ , tie_word_embeddings=A__ , pad_token_id=A__ , eos_token_id=A__ , decoder_start_token_id=A__ , **A__ , )
a__ : List[Any] = vocab_size
a__ : List[Any] = d_model
a__ : Any = d_kv
a__ : str = d_ff
a__ : List[str] = num_layers
a__ : str = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a__ : Optional[Any] = num_heads
a__ : Any = relative_attention_num_buckets
a__ : List[Any] = relative_attention_max_distance
a__ : Optional[Any] = dropout_rate
a__ : str = layer_norm_epsilon
a__ : Any = initializer_factor
a__ : Optional[Any] = feed_forward_proj
a__ : Optional[int] = use_cache
a__ : str = self.feed_forward_proj.split('''-''' )
a__ : List[Any] = act_info[-1]
a__ : Union[str, Any] = act_info[0] == '''gated'''
if len(A__ ) > 1 and act_info[0] != "gated" or len(A__ ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
if feed_forward_proj == "gated-gelu":
a__ : Any = '''gelu_new'''
@property
def __lowerCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
return self.d_model
@property
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
return self.num_heads
@property
def __lowerCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
return self.num_layers
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __lowerCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
a__ : Union[str, Any] = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
a__ : Union[str, Any] = '''past_encoder_sequence + sequence'''
a__ : Optional[Any] = {0: '''batch'''}
a__ : List[str] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
a__ : str = {0: '''batch''', 1: '''decoder_sequence'''}
a__ : List[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(A__ , direction='''inputs''' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __lowerCAmelCase ( self : Optional[Any] ) -> int:
'''simple docstring'''
return 1_3
@property
def __lowerCAmelCase ( self : Union[str, Any] ) -> float:
'''simple docstring'''
return 5E-4
| 688 |
'''simple docstring'''
import inspect
import unittest
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Dict ) -> Dict:
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def __lowerCAmelCase ( self : int ) -> str:
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
a__ : Optional[int] = inspect.getmembers(A__ , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
a__ : int = '''k-diffusion'''
elif backend == "invisible_watermark":
a__ : int = '''invisible-watermark'''
assert backend in deps, F'{backend} is not in the deps table!'
| 688 | 1 |
'''simple docstring'''
from __future__ import annotations
def __a ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __a ( lowerCAmelCase__ : Dict ):
a__ , a__ : int = image.size
a__ , a__ : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
a__ : Tuple = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
a__ : List[Any] = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 255.0
a__ : Any = image[None].transpose(0 , 3 , 1 , 2 )
a__ : Dict = torch.from_numpy(lowerCAmelCase__ )
return 2.0 * image - 1.0
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , A__ : VQModel , A__ : UNetaDModel , A__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=A__ , unet=A__ , scheduler=A__ )
@torch.no_grad()
def __call__( self : List[str] , A__ : Union[torch.Tensor, PIL.Image.Image] = None , A__ : Optional[int] = 1 , A__ : Optional[int] = 1_0_0 , A__ : Optional[float] = 0.0 , A__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A__ : Optional[str] = "pil" , A__ : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
if isinstance(A__ , PIL.Image.Image ):
a__ : List[Any] = 1
elif isinstance(A__ , torch.Tensor ):
a__ : List[str] = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(A__ )}' )
if isinstance(A__ , PIL.Image.Image ):
a__ : Union[str, Any] = preprocess(A__ )
a__ , a__ : Dict = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
a__ : Optional[int] = (batch_size, self.unet.config.in_channels // 2, height, width)
a__ : Optional[int] = next(self.unet.parameters() ).dtype
a__ : List[str] = randn_tensor(A__ , generator=A__ , device=self.device , dtype=A__ )
a__ : Any = image.to(device=self.device , dtype=A__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(A__ , device=self.device )
a__ : int = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
a__ : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a__ : Union[str, Any] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a__ : str = {}
if accepts_eta:
a__ : Dict = eta
for t in self.progress_bar(A__ ):
# concat latents and low resolution image in the channel dimension.
a__ : str = torch.cat([latents, image] , dim=1 )
a__ : Optional[Any] = self.scheduler.scale_model_input(A__ , A__ )
# predict the noise residual
a__ : Union[str, Any] = self.unet(A__ , A__ ).sample
# compute the previous noisy sample x_t -> x_t-1
a__ : Union[str, Any] = self.scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
# decode the image latents with the VQVAE
a__ : List[Any] = self.vqvae.decode(A__ ).sample
a__ : List[Any] = torch.clamp(A__ , -1.0 , 1.0 )
a__ : Optional[Any] = image / 2 + 0.5
a__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a__ : Union[str, Any] = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 688 | 1 |
'''simple docstring'''
def __a ( lowerCAmelCase__ : str ):
a__ : Tuple = 0
# if input_string is "aba" than new_input_string become "a|b|a"
a__ : str = ''''''
a__ : int = ''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(lowerCAmelCase__ ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
a__ , a__ : int = 0, 0
# length[i] shows the length of palindromic substring with center i
a__ : Any = [1 for i in range(len(lowerCAmelCase__ ) )]
# for each character in new_string find corresponding palindromic string
a__ : Union[str, Any] = 0
for j in range(len(lowerCAmelCase__ ) ):
a__ : List[str] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(lowerCAmelCase__ )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
a__ : Optional[int] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
a__ : Optional[int] = j - k + 1 # noqa: E741
a__ : Any = j + k - 1
# update max_length and start position
if max_length < length[j]:
a__ : int = length[j]
a__ : Optional[int] = j
# create that string
a__ : Optional[int] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
__SCREAMING_SNAKE_CASE = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def __a ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : str=8 ):
a__ : Tuple = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
a__ : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , A__ : UNetaDConditionModel , A__ : DDPMScheduler , A__ : VQModel , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=A__ , scheduler=A__ , movq=A__ , )
a__ : Union[str, Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCAmelCase ( self : Optional[Any] , A__ : List[Any] , A__ : List[str] , A__ : Optional[Any] , A__ : Dict , A__ : Dict , A__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
if latents is None:
a__ : List[str] = randn_tensor(A__ , generator=A__ , device=A__ , dtype=A__ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
a__ : int = latents.to(A__ )
a__ : Tuple = latents * scheduler.init_noise_sigma
return latents
def __lowerCAmelCase ( self : Union[str, Any] , A__ : int=0 ) -> str:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
a__ : Union[str, Any] = torch.device(F'cuda:{gpu_id}' )
a__ : Union[str, Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A__ , A__ )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Tuple=0 ) -> Dict:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
a__ : int = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=A__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
a__ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
a__ , a__ : List[str] = cpu_offload_with_hook(A__ , A__ , prev_module_hook=A__ )
# We'll offload the last model manually.
a__ : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(A__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A__ )
def __call__( self : Any , A__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A__ : torch.FloatTensor , A__ : int = 5_1_2 , A__ : int = 5_1_2 , A__ : int = 1_0_0 , A__ : float = 4.0 , A__ : int = 1 , A__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A__ : Optional[torch.FloatTensor] = None , A__ : Optional[str] = "pil" , A__ : bool = True , ) -> str:
'''simple docstring'''
a__ : Optional[Any] = self._execution_device
a__ : List[str] = guidance_scale > 1.0
if isinstance(A__ , A__ ):
a__ : int = torch.cat(A__ , dim=0 )
if isinstance(A__ , A__ ):
a__ : Optional[int] = torch.cat(A__ , dim=0 )
if isinstance(A__ , A__ ):
a__ : int = torch.cat(A__ , dim=0 )
a__ : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
a__ : Tuple = image_embeds.repeat_interleave(A__ , dim=0 )
a__ : Optional[int] = negative_image_embeds.repeat_interleave(A__ , dim=0 )
a__ : Optional[int] = hint.repeat_interleave(A__ , dim=0 )
a__ : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A__ )
a__ : Tuple = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=A__ )
self.scheduler.set_timesteps(A__ , device=A__ )
a__ : int = self.scheduler.timesteps
a__ : str = self.movq.config.latent_channels
a__ , a__ : Optional[int] = downscale_height_and_width(A__ , A__ , self.movq_scale_factor )
# create initial latent
a__ : List[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A__ , A__ , A__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A__ ) ):
# expand the latents if we are doing classifier free guidance
a__ : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a__ : List[str] = {'''image_embeds''': image_embeds, '''hint''': hint}
a__ : Union[str, Any] = self.unet(
sample=A__ , timestep=A__ , encoder_hidden_states=A__ , added_cond_kwargs=A__ , return_dict=A__ , )[0]
if do_classifier_free_guidance:
a__ , a__ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
a__ , a__ : Dict = noise_pred.chunk(2 )
a__ , a__ : Optional[Any] = variance_pred.chunk(2 )
a__ : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
a__ : Union[str, Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
a__ , a__ : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
a__ : Union[str, Any] = self.scheduler.step(
A__ , A__ , A__ , generator=A__ , )[0]
# post-processing
a__ : Tuple = self.movq.decode(A__ , force_not_quantize=A__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
a__ : Union[str, Any] = image * 0.5 + 0.5
a__ : str = image.clamp(0 , 1 )
a__ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a__ : int = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 688 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.26.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('>=', '0.0.12')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 688 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
__SCREAMING_SNAKE_CASE = {
'facebook/esm2_t6_8M_UR50D': 1_0_2_4,
'facebook/esm2_t12_35M_UR50D': 1_0_2_4,
}
def __a ( lowerCAmelCase__ : Union[str, Any] ):
with open(lowerCAmelCase__ , '''r''' ) as f:
a__ : Optional[int] = f.read().splitlines()
return [l.strip() for l in lines]
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self : List[str] , A__ : int , A__ : Union[str, Any]="<unk>" , A__ : Tuple="<cls>" , A__ : List[Any]="<pad>" , A__ : Optional[int]="<mask>" , A__ : List[Any]="<eos>" , **A__ : Optional[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**A__ )
a__ : Union[str, Any] = load_vocab_file(A__ )
a__ : int = dict(enumerate(self.all_tokens ) )
a__ : str = {tok: ind for ind, tok in enumerate(self.all_tokens )}
a__ : List[Any] = unk_token
a__ : Any = cls_token
a__ : Any = pad_token
a__ : Any = mask_token
a__ : Any = eos_token
a__ : int = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __lowerCAmelCase ( self : Any , A__ : int ) -> str:
'''simple docstring'''
return self._id_to_token.get(A__ , self.unk_token )
def __lowerCAmelCase ( self : Optional[Any] , A__ : str ) -> int:
'''simple docstring'''
return self._token_to_id.get(A__ , self._token_to_id.get(self.unk_token ) )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Tuple , **A__ : str ) -> List[Any]:
'''simple docstring'''
return text.split()
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Optional[int]=False ) -> Tuple:
'''simple docstring'''
return len(self._id_to_token )
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def __lowerCAmelCase ( self : Any , A__ : str ) -> int:
'''simple docstring'''
return self._token_to_id.get(A__ , self._token_to_id.get(self.unk_token ) )
def __lowerCAmelCase ( self : List[Any] , A__ : int ) -> str:
'''simple docstring'''
return self._id_to_token.get(A__ , self.unk_token )
def __lowerCAmelCase ( self : str , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : Tuple = [self.cls_token_id]
a__ : Union[str, Any] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __lowerCAmelCase ( self : Tuple , A__ : List , A__ : Optional[List] = None , A__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
a__ : Any = [1] + ([0] * len(A__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(A__ ) + [1]
return mask
def __lowerCAmelCase ( self : Any , A__ : Dict , A__ : Dict ) -> List[Any]:
'''simple docstring'''
a__ : Union[str, Any] = os.path.join(A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(A__ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __lowerCAmelCase ( self : Any ) -> int:
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=A__ )
def __lowerCAmelCase ( self : List[str] , A__ : Union[List[str], List[AddedToken]] , A__ : bool = False ) -> int:
'''simple docstring'''
return super()._add_tokens(A__ , special_tokens=A__ )
| 688 | 1 |
'''simple docstring'''
def __a ( lowerCAmelCase__ : str ):
a__ : Optional[int] = [int(lowerCAmelCase__ ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(lowerCAmelCase__ ) == 4 and all(0 <= int(lowerCAmelCase__ ) <= 254 for octet in octets )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = input().strip()
__SCREAMING_SNAKE_CASE = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(f'{ip} is a {valid_or_invalid} IP v4 address.')
| 688 |
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
__SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : str ) -> Dict:
'''simple docstring'''
a__ : List[str] = False
def __lowerCAmelCase ( self : Tuple , A__ : Optional[int] , A__ : Optional[Any] , A__ : List[str] , A__ : Tuple ) -> Optional[int]:
'''simple docstring'''
if not self.initialized:
a__ : Optional[Any] = RagRetriever(
A__ , question_encoder_tokenizer=A__ , generator_tokenizer=A__ , index=A__ , init_retrieval=A__ , )
a__ : Union[str, Any] = True
def __lowerCAmelCase ( self : Tuple ) -> Tuple:
'''simple docstring'''
self.retriever.index.init_index()
def __lowerCAmelCase ( self : List[Any] , A__ : List[Any] , A__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
a__ , a__ : Optional[Any] = self.retriever._main_retrieve(A__ , A__ )
return doc_ids, retrieved_doc_embeds
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : str , A__ : Optional[int] , A__ : List[Any] , A__ : List[Any] , A__ : str , A__ : Any=None ) -> Optional[Any]:
'''simple docstring'''
if index is not None and index.is_initialized() and len(A__ ) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''' )
super().__init__(
A__ , question_encoder_tokenizer=A__ , generator_tokenizer=A__ , index=A__ , init_retrieval=A__ , )
a__ : List[str] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(A__ , A__ , A__ , A__ )
for worker in self.retrieval_workers
] )
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
logger.info('''initializing retrieval''' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __lowerCAmelCase ( self : Optional[int] , A__ : Optional[int] , A__ : int ) -> Dict:
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
a__ : List[Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
a__ , a__ : Tuple = ray.get(random_worker.retrieve.remote(A__ , A__ ) )
else:
a__ , a__ : int = self._main_retrieve(A__ , A__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A__ )
@classmethod
def __lowerCAmelCase ( cls : int , A__ : Optional[Any] , A__ : Any=None , **A__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return super(A__ , cls ).get_tokenizers(A__ , A__ , **A__ )
@classmethod
def __lowerCAmelCase ( cls : int , A__ : Optional[int] , A__ : Union[str, Any] , A__ : Union[str, Any]=None , **A__ : Dict ) -> List[Any]:
'''simple docstring'''
a__ : Dict = kwargs.pop('''config''' , A__ ) or RagConfig.from_pretrained(A__ , **A__ )
a__ : Dict = RagTokenizer.from_pretrained(A__ , config=A__ )
a__ : str = rag_tokenizer.question_encoder
a__ : List[str] = rag_tokenizer.generator
if indexed_dataset is not None:
a__ : List[Any] = '''custom'''
a__ : List[Any] = CustomHFIndex(config.retrieval_vector_size , A__ )
else:
a__ : Optional[Any] = cls._build_index(A__ )
return cls(
A__ , question_encoder_tokenizer=A__ , generator_tokenizer=A__ , retrieval_workers=A__ , index=A__ , )
| 688 | 1 |
'''simple docstring'''
def __a ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : int ):
a__ : List[str] = len(lowerCAmelCase__ )
a__ : int = [[0] * n for i in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
a__ : Dict = y_points[i]
for i in range(2 , lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : Any = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 |
'''simple docstring'''
def __a ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : int ):
a__ : List[str] = len(lowerCAmelCase__ )
a__ : int = [[0] * n for i in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
a__ : Dict = y_points[i]
for i in range(2 , lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : Any = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = ["pixel_values"]
def __init__( self : Dict , A__ : bool = True , A__ : Dict[str, int] = None , A__ : PILImageResampling = PILImageResampling.BICUBIC , A__ : bool = True , A__ : Union[int, float] = 1 / 2_5_5 , A__ : bool = True , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : bool = True , **A__ : Union[str, Any] , ) -> None:
'''simple docstring'''
super().__init__(**A__ )
a__ : Union[str, Any] = size if size is not None else {'''height''': 3_8_4, '''width''': 3_8_4}
a__ : Dict = get_size_dict(A__ , default_to_square=A__ )
a__ : Tuple = do_resize
a__ : int = size
a__ : Dict = resample
a__ : Optional[Any] = do_rescale
a__ : Any = rescale_factor
a__ : int = do_normalize
a__ : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
a__ : str = image_std if image_std is not None else OPENAI_CLIP_STD
a__ : List[str] = do_convert_rgb
def __lowerCAmelCase ( self : int , A__ : np.ndarray , A__ : Dict[str, int] , A__ : PILImageResampling = PILImageResampling.BICUBIC , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : List[Any] , ) -> np.ndarray:
'''simple docstring'''
a__ : Tuple = get_size_dict(A__ , default_to_square=A__ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
a__ : Dict = (size['''height'''], size['''width'''])
return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__ )
def __lowerCAmelCase ( self : Tuple , A__ : np.ndarray , A__ : Union[int, float] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Any , ) -> Any:
'''simple docstring'''
return rescale(A__ , scale=A__ , data_format=A__ , **A__ )
def __lowerCAmelCase ( self : Optional[Any] , A__ : np.ndarray , A__ : Union[float, List[float]] , A__ : Union[float, List[float]] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : List[str] , ) -> np.ndarray:
'''simple docstring'''
return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__ )
def __lowerCAmelCase ( self : List[Any] , A__ : ImageInput , A__ : Optional[bool] = None , A__ : Optional[Dict[str, int]] = None , A__ : PILImageResampling = None , A__ : Optional[bool] = None , A__ : Optional[float] = None , A__ : Optional[bool] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[str, TensorType]] = None , A__ : bool = None , A__ : ChannelDimension = ChannelDimension.FIRST , **A__ : int , ) -> PIL.Image.Image:
'''simple docstring'''
a__ : Any = do_resize if do_resize is not None else self.do_resize
a__ : Dict = resample if resample is not None else self.resample
a__ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
a__ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
a__ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
a__ : List[Any] = image_mean if image_mean is not None else self.image_mean
a__ : Tuple = image_std if image_std is not None else self.image_std
a__ : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a__ : Union[str, Any] = size if size is not None else self.size
a__ : Dict = get_size_dict(A__ , default_to_square=A__ )
a__ : Tuple = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a__ : Union[str, Any] = [convert_to_rgb(A__ ) for image in images]
# All transformations expect numpy arrays.
a__ : List[Any] = [to_numpy_array(A__ ) for image in images]
if do_resize:
a__ : Any = [self.resize(image=A__ , size=A__ , resample=A__ ) for image in images]
if do_rescale:
a__ : Any = [self.rescale(image=A__ , scale=A__ ) for image in images]
if do_normalize:
a__ : Union[str, Any] = [self.normalize(image=A__ , mean=A__ , std=A__ ) for image in images]
a__ : Dict = [to_channel_dimension_format(A__ , A__ ) for image in images]
a__ : Tuple = BatchFeature(data={'''pixel_values''': images} , tensor_type=A__ )
return encoded_outputs
| 688 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = "swin2sr"
__UpperCamelCase = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Union[str, Any] , A__ : int=6_4 , A__ : List[Any]=1 , A__ : List[Any]=3 , A__ : Any=1_8_0 , A__ : Optional[int]=[6, 6, 6, 6, 6, 6] , A__ : Optional[int]=[6, 6, 6, 6, 6, 6] , A__ : Dict=8 , A__ : Any=2.0 , A__ : Optional[int]=True , A__ : Union[str, Any]=0.0 , A__ : Union[str, Any]=0.0 , A__ : List[str]=0.1 , A__ : Any="gelu" , A__ : Tuple=False , A__ : Optional[int]=0.02 , A__ : List[Any]=1E-5 , A__ : Any=2 , A__ : Union[str, Any]=1.0 , A__ : Dict="1conv" , A__ : Optional[Any]="pixelshuffle" , **A__ : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**A__ )
a__ : List[str] = image_size
a__ : Optional[Any] = patch_size
a__ : Dict = num_channels
a__ : Optional[int] = embed_dim
a__ : int = depths
a__ : Optional[int] = len(A__ )
a__ : Dict = num_heads
a__ : List[Any] = window_size
a__ : Optional[int] = mlp_ratio
a__ : Optional[int] = qkv_bias
a__ : Union[str, Any] = hidden_dropout_prob
a__ : Dict = attention_probs_dropout_prob
a__ : Union[str, Any] = drop_path_rate
a__ : int = hidden_act
a__ : int = use_absolute_embeddings
a__ : Dict = layer_norm_eps
a__ : List[str] = initializer_range
a__ : List[Any] = upscale
a__ : List[Any] = img_range
a__ : Optional[int] = resi_connection
a__ : int = upsampler
| 688 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def __a ( lowerCAmelCase__ : Dict ):
a__ : List[Any] = DPTConfig()
if "large" in checkpoint_url:
a__ : Union[str, Any] = 1024
a__ : Any = 4096
a__ : Optional[int] = 24
a__ : int = 16
a__ : List[str] = [5, 11, 17, 23]
a__ : Union[str, Any] = [256, 512, 1024, 1024]
a__ : Tuple = (1, 384, 384)
if "ade" in checkpoint_url:
a__ : Tuple = True
a__ : Union[str, Any] = 150
a__ : Union[str, Any] = '''huggingface/label-files'''
a__ : Optional[int] = '''ade20k-id2label.json'''
a__ : Optional[int] = json.load(open(cached_download(hf_hub_url(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='''dataset''' ) ) , '''r''' ) )
a__ : List[Any] = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
a__ : int = idalabel
a__ : Optional[int] = {v: k for k, v in idalabel.items()}
a__ : Dict = [1, 150, 480, 480]
return config, expected_shape
def __a ( lowerCAmelCase__ : Union[str, Any] ):
a__ : Tuple = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( lowerCAmelCase__ : Union[str, Any] ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
a__ : str = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
a__ : List[Any] = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
a__ : Optional[int] = name.replace('''patch_embed''' , '''patch_embeddings''' )
if "pos_embed" in name:
a__ : str = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
a__ : Tuple = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
a__ : int = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
a__ : Optional[Any] = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
a__ : List[Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
a__ : Union[str, Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name:
a__ : Optional[int] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
a__ : Optional[int] = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
a__ : List[str] = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
a__ : Optional[int] = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
a__ : Dict = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
a__ : Optional[Any] = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
a__ : Tuple = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
a__ : Tuple = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
a__ : Tuple = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
a__ : Dict = name.replace(F'refinenet{layer_idx}' , F'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
a__ : Optional[int] = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
a__ : Tuple = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
a__ : str = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
a__ : Union[str, Any] = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
a__ : Any = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
a__ : str = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
a__ : str = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
a__ : Any = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
a__ : str = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
a__ : Optional[int] = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
a__ : str = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
a__ : Optional[Any] = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
a__ : Union[str, Any] = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
a__ : Union[str, Any] = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
a__ : Dict = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
a__ : str = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
a__ : Tuple = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
a__ : List[str] = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
a__ : List[str] = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
a__ : Optional[int] = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
a__ : List[Any] = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
return name
def __a ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a__ : List[str] = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.weight' )
a__ : Tuple = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
a__ : Optional[int] = in_proj_weight[: config.hidden_size, :]
a__ : Optional[int] = in_proj_bias[: config.hidden_size]
a__ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a__ : Dict = in_proj_weight[
-config.hidden_size :, :
]
a__ : Optional[Any] = in_proj_bias[-config.hidden_size :]
def __a ( ):
a__ : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a__ : List[Any] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def __a ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] ):
a__ , a__ : int = get_dpt_config(lowerCAmelCase__ )
# load original state_dict from URL
a__ : str = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(lowerCAmelCase__ )
# rename keys
for key in state_dict.copy().keys():
a__ : Dict = state_dict.pop(lowerCAmelCase__ )
a__ : Union[str, Any] = val
# read in qkv matrices
read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# load HuggingFace model
a__ : List[Any] = DPTForSemanticSegmentation(lowerCAmelCase__ ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
# Check outputs on an image
a__ : List[Any] = 480 if '''ade''' in checkpoint_url else 384
a__ : Optional[int] = DPTImageProcessor(size=lowerCAmelCase__ )
a__ : Optional[int] = prepare_img()
a__ : int = image_processor(lowerCAmelCase__ , return_tensors='''pt''' )
# forward pass
a__ : List[Any] = model(**lowerCAmelCase__ ).logits if '''ade''' in checkpoint_url else model(**lowerCAmelCase__ ).predicted_depth
# Assert logits
a__ : Dict = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
a__ : List[Any] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(lowerCAmelCase__ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowerCAmelCase__ )
)
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print('''Pushing model to hub...''' )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ , lowerCAmelCase__ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=lowerCAmelCase__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ , lowerCAmelCase__ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=lowerCAmelCase__ , )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 688 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
a__ : int = 0
def __lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : List[Any] = Path(A__ ) / '''preprocessor_config.json'''
a__ : List[Any] = Path(A__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
a__ : Any = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : int = Path(A__ ) / '''preprocessor_config.json'''
a__ : Optional[Any] = Path(A__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
a__ : Tuple = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Dict = CLIPConfig()
# Create a dummy config file with image_proceesor_type
a__ : int = Path(A__ ) / '''preprocessor_config.json'''
a__ : Optional[int] = Path(A__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
a__ : List[Any] = AutoImageProcessor.from_pretrained(A__ ).to_dict()
config_dict.pop('''image_processor_type''' )
a__ : Union[str, Any] = CLIPImageProcessor(**A__ )
# save in new folder
model_config.save_pretrained(A__ )
config.save_pretrained(A__ )
a__ : Union[str, Any] = AutoImageProcessor.from_pretrained(A__ )
# make sure private variable is not incorrectly saved
a__ : Optional[Any] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Optional[int] = Path(A__ ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
a__ : Any = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
A__ , '''clip-base is not a local folder and is not a valid model identifier''' ):
a__ : str = AutoImageProcessor.from_pretrained('''clip-base''' )
def __lowerCAmelCase ( self : Optional[Any] ) -> int:
'''simple docstring'''
with self.assertRaisesRegex(
A__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
a__ : Tuple = AutoImageProcessor.from_pretrained(A__ , revision='''aaaaaa''' )
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
A__ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
a__ : Union[str, Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
with self.assertRaises(A__ ):
a__ : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A__ ):
a__ : Tuple = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
a__ : Tuple = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(A__ )
a__ : str = AutoImageProcessor.from_pretrained(A__ , trust_remote_code=A__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , A__ )
AutoImageProcessor.register(A__ , A__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A__ ):
AutoImageProcessor.register(A__ , A__ )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Optional[int] = Path(A__ ) / '''preprocessor_config.json'''
a__ : List[str] = Path(A__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(A__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A__ , '''w''' ) )
a__ : Tuple = CustomImageProcessor.from_pretrained(A__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(A__ )
a__ : Tuple = AutoImageProcessor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
'''simple docstring'''
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = True
try:
AutoConfig.register('''custom''' , A__ )
AutoImageProcessor.register(A__ , A__ )
# If remote code is not set, the default is to use local
a__ : Dict = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
a__ : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
a__ : Optional[int] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(A__ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 688 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 688 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
__SCREAMING_SNAKE_CASE = get_logger(__name__)
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCamelCase = "dummy_data"
__UpperCamelCase = "datasets"
__UpperCamelCase = False
def __init__( self : Any , A__ : str , A__ : str , A__ : Union[Version, str] , A__ : Optional[str] = None , A__ : bool = False , A__ : bool = True , A__ : Optional[List[Callable]] = None , ) -> int:
'''simple docstring'''
a__ : Tuple = 0
a__ : Any = dataset_name
a__ : int = cache_dir
a__ : str = use_local_dummy_data
a__ : List[str] = config
# download_callbacks take a single url as input
a__ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
a__ : str = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
a__ : Optional[Any] = str(A__ )
# to be downloaded
a__ : Tuple = None
a__ : Tuple = None
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if self._dummy_file is None:
a__ : Dict = self.download_dummy_data()
return self._dummy_file
@property
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
a__ : int = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
a__ : str = cached_path(
A__ , cache_dir=self.cache_dir , extract_compressed_file=A__ , force_extract=A__ )
return os.path.join(A__ , self.dummy_file_name )
@property
def __lowerCAmelCase ( self : int ) -> Optional[int]:
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if self._bucket_url is None:
a__ : int = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
'''simple docstring'''
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Optional[int] , *A__ : int ) -> Union[str, Any]:
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
a__ : Tuple = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
a__ : Union[str, Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(A__ , A__ ):
return self.create_dummy_data_dict(A__ , A__ )
elif isinstance(A__ , (list, tuple) ):
return self.create_dummy_data_list(A__ , A__ )
else:
return self.create_dummy_data_single(A__ , A__ )
def __lowerCAmelCase ( self : List[str] , A__ : Any , *A__ : int ) -> Any:
'''simple docstring'''
return self.download_and_extract(A__ )
def __lowerCAmelCase ( self : Any , A__ : Optional[int] , A__ : Optional[Any] ) -> int:
'''simple docstring'''
return self.download_and_extract(A__ )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : int , *A__ : List[Any] , **A__ : str ) -> Optional[Any]:
'''simple docstring'''
return path
def __lowerCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
return {}
def __lowerCAmelCase ( self : int , A__ : Union[str, Any] , A__ : List[str] ) -> Any:
'''simple docstring'''
a__ : int = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(A__ , A__ ):
for single_url in single_urls:
download_callback(A__ )
else:
a__ : Dict = single_urls
download_callback(A__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(A__ , A__ ):
a__ : Optional[int] = [os.path.join(A__ , urllib.parse.quote_plus(Path(A__ ).name ) ) for x in single_urls]
else:
a__ : Optional[Any] = single_urls
a__ : Tuple = os.path.join(A__ , urllib.parse.quote_plus(Path(A__ ).name ) )
a__ : List[str] = value
# make sure that values are unique
if all(isinstance(A__ , A__ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
a__ : Optional[int] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __lowerCAmelCase ( self : Dict , A__ : str , A__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
a__ : str = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
a__ : Union[str, Any] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , A__ ) ) for url in data_url )
a__ : Optional[Any] = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
a__ : Dict = [data_url[0]] * len(A__ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(A__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
a__ : Optional[int] = os.path.join(A__ , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(A__ )
return dummy_data_list
def __lowerCAmelCase ( self : Dict , A__ : Dict , A__ : str ) -> Optional[int]:
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(A__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
a__ : Union[str, Any] = os.path.join(A__ , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(A__ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __lowerCAmelCase ( self : int ) -> str:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Any , A__ : Tuple ) -> Any:
'''simple docstring'''
def _iter_archive_members(A__ : str ):
# this preserves the order of the members inside the ZIP archive
a__ : Dict = Path(self.dummy_file ).parent
a__ : Tuple = path.relative_to(A__ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
a__ : Optional[Any] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(A__ )
a__ : str = Path(A__ )
a__ : Optional[Any] = _iter_archive_members(A__ ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(A__ ).as_posix(), file_path.open('''rb''' )
def __lowerCAmelCase ( self : Tuple , A__ : Tuple ) -> Tuple:
'''simple docstring'''
if not isinstance(A__ , A__ ):
a__ : int = [paths]
for path in paths:
if os.path.isfile(A__ ):
if os.path.basename(A__ ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(A__ ):
if os.path.basename(A__ ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(A__ ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(A__ , A__ )
| 688 | 1 |
'''simple docstring'''
import unittest
import numpy as np
def __a ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray | None = None , ):
a__ : Union[str, Any] = np.shape(lowerCAmelCase__ )
a__ : Tuple = np.shape(lowerCAmelCase__ )
a__ : List[Any] = np.shape(lowerCAmelCase__ )
if shape_a[0] != shape_b[0]:
a__ : List[Any] = (
'''Expected the same number of rows for A and B. '''
F'Instead found A of size {shape_a} and B of size {shape_b}'
)
raise ValueError(lowerCAmelCase__ )
if shape_b[1] != shape_c[1]:
a__ : Optional[int] = (
'''Expected the same number of columns for B and C. '''
F'Instead found B of size {shape_b} and C of size {shape_c}'
)
raise ValueError(lowerCAmelCase__ )
a__ : List[Any] = pseudo_inv
if a_inv is None:
try:
a__ : int = np.linalg.inv(lowerCAmelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Any ) -> None:
'''simple docstring'''
a__ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
a__ : Union[str, Any] = np.array([[0, 3], [3, 0], [2, 3]] )
a__ : List[str] = np.array([[2, 1], [6, 3]] )
a__ : int = schur_complement(A__ , A__ , A__ )
a__ : List[str] = np.block([[a, b], [b.T, c]] )
a__ : Tuple = np.linalg.det(A__ )
a__ : Dict = np.linalg.det(A__ )
a__ : List[str] = np.linalg.det(A__ )
self.assertAlmostEqual(A__ , det_a * det_s )
def __lowerCAmelCase ( self : Union[str, Any] ) -> None:
'''simple docstring'''
a__ : List[str] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
a__ : int = np.array([[0, 3], [3, 0], [2, 3]] )
a__ : Tuple = np.array([[2, 1], [6, 3]] )
with self.assertRaises(A__ ):
schur_complement(A__ , A__ , A__ )
def __lowerCAmelCase ( self : Tuple ) -> None:
'''simple docstring'''
a__ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
a__ : List[str] = np.array([[0, 3], [3, 0], [2, 3]] )
a__ : Any = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(A__ ):
schur_complement(A__ , A__ , A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 688 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = LxmertTokenizer
__UpperCamelCase = LxmertTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def __lowerCAmelCase ( self : str ) -> str:
'''simple docstring'''
super().setUp()
a__ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self : int , A__ : int ) -> int:
'''simple docstring'''
a__ : List[Any] = '''UNwant\u00E9d,running'''
a__ : Optional[int] = '''unwanted, running'''
return input_text, output_text
def __lowerCAmelCase ( self : int ) -> Dict:
'''simple docstring'''
a__ : Optional[int] = self.tokenizer_class(self.vocab_file )
a__ : List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [7, 4, 5, 1_0, 8, 9] )
def __lowerCAmelCase ( self : Any ) -> Dict:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a__ : Union[str, Any] = self.get_tokenizer()
a__ : Union[str, Any] = self.get_rust_tokenizer()
a__ : str = '''I was born in 92000, and this is falsé.'''
a__ : Tuple = tokenizer.tokenize(A__ )
a__ : Tuple = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
a__ : Optional[int] = tokenizer.encode(A__ , add_special_tokens=A__ )
a__ : Optional[Any] = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
a__ : List[str] = self.get_rust_tokenizer()
a__ : str = tokenizer.encode(A__ )
a__ : int = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
| 688 | 1 |
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__SCREAMING_SNAKE_CASE = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation='relu')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation='relu'))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation='relu'))
classifier.add(layers.Dense(units=1, activation='sigmoid'))
# Compiling the CNN
classifier.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
__SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory(
'dataset/training_set', target_size=(6_4, 6_4), batch_size=3_2, class_mode='binary'
)
__SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory(
'dataset/test_set', target_size=(6_4, 6_4), batch_size=3_2, class_mode='binary'
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save('cnn.h5')
# Part 3 - Making new predictions
__SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img(
'dataset/single_prediction/image.png', target_size=(6_4, 6_4)
)
__SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image)
__SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0)
__SCREAMING_SNAKE_CASE = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__SCREAMING_SNAKE_CASE = 'Normal'
if result[0][0] == 1:
__SCREAMING_SNAKE_CASE = 'Abnormality detected'
| 688 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str ):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
a__ : Dict = TapasConfig.from_json_file(lowerCAmelCase__ )
# set absolute/relative position embeddings parameter
a__ : List[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
a__ : Optional[Any] = TapasForQuestionAnswering(config=lowerCAmelCase__ )
elif task == "WTQ":
# run_task_main.py hparams
a__ : List[str] = 4
a__ : Optional[int] = True
# hparam_utils.py hparams
a__ : List[Any] = 0.664694
a__ : List[Any] = 0.207951
a__ : Union[str, Any] = 0.121194
a__ : Optional[Any] = True
a__ : Optional[int] = True
a__ : List[str] = False
a__ : Union[str, Any] = 0.0352513
a__ : Any = TapasForQuestionAnswering(config=lowerCAmelCase__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
a__ : Tuple = 4
a__ : Dict = False
# hparam_utils.py hparams
a__ : str = 36.4519
a__ : str = 0.903421
a__ : Optional[Any] = 222.088
a__ : Dict = True
a__ : Dict = True
a__ : Dict = True
a__ : str = 0.763141
a__ : List[Any] = TapasForQuestionAnswering(config=lowerCAmelCase__ )
elif task == "TABFACT":
a__ : List[str] = TapasForSequenceClassification(config=lowerCAmelCase__ )
elif task == "MLM":
a__ : Tuple = TapasForMaskedLM(config=lowerCAmelCase__ )
elif task == "INTERMEDIATE_PRETRAINING":
a__ : List[str] = TapasModel(config=lowerCAmelCase__ )
else:
raise ValueError(F'Task {task} not supported.' )
print(F'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model (weights and configuration)
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowerCAmelCase__ )
# Save tokenizer files
print(F'Save tokenizer files to {pytorch_dump_path}' )
a__ : Optional[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + '''vocab.txt''' , model_max_length=512 )
tokenizer.save_pretrained(lowerCAmelCase__ )
print('''Used relative position embeddings:''' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 688 | 1 |
'''simple docstring'''
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 688 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
__SCREAMING_SNAKE_CASE = {
'google/fnet-base': 5_1_2,
'google/fnet-large': 5_1_2,
}
__SCREAMING_SNAKE_CASE = '▁'
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "token_type_ids"]
__UpperCamelCase = FNetTokenizer
def __init__( self : Any , A__ : Any=None , A__ : int=None , A__ : List[str]=False , A__ : int=True , A__ : str=True , A__ : List[Any]="<unk>" , A__ : Dict="[SEP]" , A__ : List[str]="<pad>" , A__ : Union[str, Any]="[CLS]" , A__ : Dict="[MASK]" , **A__ : Tuple , ) -> List[str]:
'''simple docstring'''
a__ : Optional[int] = (
AddedToken(A__ , lstrip=A__ , rstrip=A__ , normalized=A__ )
if isinstance(A__ , A__ )
else mask_token
)
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , remove_space=A__ , keep_accents=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , **A__ , )
a__ : Optional[Any] = do_lower_case
a__ : Dict = remove_space
a__ : List[Any] = keep_accents
a__ : Optional[Any] = vocab_file
a__ : Any = False if not self.vocab_file else True
def __lowerCAmelCase ( self : str , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : Optional[int] = [self.sep_token_id]
a__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCAmelCase ( self : List[Any] , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : Dict = [self.sep_token_id]
a__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Tuple , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a__ : Union[str, Any] = os.path.join(
A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ):
copyfile(self.vocab_file , A__ )
return (out_vocab_file,)
| 688 | 1 |
'''simple docstring'''
from statistics import mean, stdev
def __a ( lowerCAmelCase__ : list , lowerCAmelCase__ : int = 3 ):
a__ : List[Any] = min(lowerCAmelCase__ )
a__ : Optional[int] = max(lowerCAmelCase__ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , lowerCAmelCase__ ) for x in data]
def __a ( lowerCAmelCase__ : list , lowerCAmelCase__ : int = 3 ):
a__ : str = mean(lowerCAmelCase__ )
a__ : str = stdev(lowerCAmelCase__ )
# standardize data
return [round((x - mu) / (sigma) , lowerCAmelCase__ ) for x in data]
| 688 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-german-cased': (
'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'
),
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'
),
},
}
__SCREAMING_SNAKE_CASE = {
'distilbert-base-uncased': 5_1_2,
'distilbert-base-uncased-distilled-squad': 5_1_2,
'distilbert-base-cased': 5_1_2,
'distilbert-base-cased-distilled-squad': 5_1_2,
'distilbert-base-german-cased': 5_1_2,
'distilbert-base-multilingual-cased': 5_1_2,
}
__SCREAMING_SNAKE_CASE = {
'distilbert-base-uncased': {'do_lower_case': True},
'distilbert-base-uncased-distilled-squad': {'do_lower_case': True},
'distilbert-base-cased': {'do_lower_case': False},
'distilbert-base-cased-distilled-squad': {'do_lower_case': False},
'distilbert-base-german-cased': {'do_lower_case': False},
'distilbert-base-multilingual-cased': {'do_lower_case': False},
}
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = ["input_ids", "attention_mask"]
__UpperCamelCase = DistilBertTokenizer
def __init__( self : str , A__ : Optional[Any]=None , A__ : Any=None , A__ : Tuple=True , A__ : List[Any]="[UNK]" , A__ : List[str]="[SEP]" , A__ : Tuple="[PAD]" , A__ : Optional[int]="[CLS]" , A__ : Union[str, Any]="[MASK]" , A__ : List[str]=True , A__ : Any=None , **A__ : int , ) -> str:
'''simple docstring'''
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , tokenize_chinese_chars=A__ , strip_accents=A__ , **A__ , )
a__ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , A__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , A__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , A__ ) != tokenize_chinese_chars
):
a__ : int = getattr(A__ , normalizer_state.pop('''type''' ) )
a__ : List[Any] = do_lower_case
a__ : str = strip_accents
a__ : List[str] = tokenize_chinese_chars
a__ : Dict = normalizer_class(**A__ )
a__ : List[Any] = do_lower_case
def __lowerCAmelCase ( self : Tuple , A__ : List[str] , A__ : Dict=None ) -> List[str]:
'''simple docstring'''
a__ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : int , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : List[str] = [self.sep_token_id]
a__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : str , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
a__ : int = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
| 688 | 1 |
'''simple docstring'''
def __a ( lowerCAmelCase__ : int ):
if num < 0:
return False
a__ : int = num
a__ : int = 0
while num > 0:
a__ : List[str] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__SCREAMING_SNAKE_CASE = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__SCREAMING_SNAKE_CASE = tuple[int, int]
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : str , A__ : int , A__ : int , A__ : int , A__ : int , A__ : int , A__ : Node | None , ) -> None:
'''simple docstring'''
a__ : Optional[int] = pos_x
a__ : str = pos_y
a__ : Optional[int] = (pos_y, pos_x)
a__ : List[str] = goal_x
a__ : Any = goal_y
a__ : Any = g_cost
a__ : Optional[int] = parent
a__ : Union[str, Any] = self.calculate_heuristic()
a__ : List[Any] = self.g_cost + self.h_cost
def __lowerCAmelCase ( self : Union[str, Any] ) -> float:
'''simple docstring'''
a__ : List[str] = self.pos_x - self.goal_x
a__ : List[str] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(A__ ) + abs(A__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : List[Any] , A__ : Node ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , A__ : TPosition , A__ : TPosition ) -> Optional[Any]:
'''simple docstring'''
a__ : int = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , A__ )
a__ : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , A__ )
a__ : Dict = [self.start]
a__ : list[Node] = []
a__ : str = False
def __lowerCAmelCase ( self : List[str] ) -> list[TPosition]:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
a__ : Dict = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(A__ )
self.closed_nodes.append(A__ )
a__ : List[Any] = self.get_successors(A__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(A__ )
else:
# retrieve the best current path
a__ : Optional[int] = self.open_nodes.pop(self.open_nodes.index(A__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(A__ )
else:
self.open_nodes.append(A__ )
return [self.start.pos]
def __lowerCAmelCase ( self : Optional[Any] , A__ : Node ) -> list[Node]:
'''simple docstring'''
a__ : Optional[int] = []
for action in delta:
a__ : List[Any] = parent.pos_x + action[1]
a__ : Tuple = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
A__ , A__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , A__ , ) )
return successors
def __lowerCAmelCase ( self : List[Any] , A__ : Node | None ) -> list[TPosition]:
'''simple docstring'''
a__ : Union[str, Any] = node
a__ : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
a__ : Any = current_node.parent
path.reverse()
return path
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , A__ : TPosition , A__ : TPosition ) -> None:
'''simple docstring'''
a__ : str = AStar(A__ , A__ )
a__ : Optional[int] = AStar(A__ , A__ )
a__ : List[str] = False
def __lowerCAmelCase ( self : Tuple ) -> list[TPosition]:
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
a__ : int = self.fwd_astar.open_nodes.pop(0 )
a__ : List[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
A__ , A__ )
self.fwd_astar.closed_nodes.append(A__ )
self.bwd_astar.closed_nodes.append(A__ )
a__ : Tuple = current_bwd_node
a__ : Optional[int] = current_fwd_node
a__ : Optional[int] = {
self.fwd_astar: self.fwd_astar.get_successors(A__ ),
self.bwd_astar: self.bwd_astar.get_successors(A__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(A__ )
else:
# retrieve the best current path
a__ : Optional[Any] = astar.open_nodes.pop(
astar.open_nodes.index(A__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(A__ )
else:
astar.open_nodes.append(A__ )
return [self.fwd_astar.start.pos]
def __lowerCAmelCase ( self : List[str] , A__ : Node , A__ : Node ) -> list[TPosition]:
'''simple docstring'''
a__ : str = self.fwd_astar.retrace_path(A__ )
a__ : List[str] = self.bwd_astar.retrace_path(A__ )
bwd_path.pop()
bwd_path.reverse()
a__ : Optional[int] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__SCREAMING_SNAKE_CASE = (0, 0)
__SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__SCREAMING_SNAKE_CASE = time.time()
__SCREAMING_SNAKE_CASE = AStar(init, goal)
__SCREAMING_SNAKE_CASE = a_star.search()
__SCREAMING_SNAKE_CASE = time.time() - start_time
print(f'AStar execution time = {end_time:f} seconds')
__SCREAMING_SNAKE_CASE = time.time()
__SCREAMING_SNAKE_CASE = BidirectionalAStar(init, goal)
__SCREAMING_SNAKE_CASE = time.time() - bd_start_time
print(f'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 688 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
a__ : Any = tempfile.mkdtemp()
# fmt: off
a__ : str = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
a__ : Optional[Any] = dict(zip(A__ , range(len(A__ ) ) ) )
a__ : str = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
a__ : Optional[Any] = {'''unk_token''': '''<unk>'''}
a__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A__ ) )
a__ : Tuple = {
'''do_resize''': True,
'''size''': 2_0,
'''do_center_crop''': True,
'''crop_size''': 1_8,
'''do_normalize''': True,
'''image_mean''': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'''image_std''': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
a__ : Optional[int] = os.path.join(self.tmpdirname , A__ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(A__ , A__ )
def __lowerCAmelCase ( self : Any , **A__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **A__ )
def __lowerCAmelCase ( self : Union[str, Any] , **A__ : str ) -> List[str]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **A__ )
def __lowerCAmelCase ( self : Tuple , **A__ : Tuple ) -> List[Any]:
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **A__ )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
a__ : Tuple = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
a__ : Union[str, Any] = [Image.fromarray(np.moveaxis(A__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
'''simple docstring'''
a__ : Optional[Any] = self.get_tokenizer()
a__ : List[str] = self.get_rust_tokenizer()
a__ : str = self.get_image_processor()
a__ : Optional[int] = OwlViTProcessor(tokenizer=A__ , image_processor=A__ )
processor_slow.save_pretrained(self.tmpdirname )
a__ : Any = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=A__ )
a__ : List[str] = OwlViTProcessor(tokenizer=A__ , image_processor=A__ )
processor_fast.save_pretrained(self.tmpdirname )
a__ : List[str] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A__ )
self.assertIsInstance(processor_fast.tokenizer , A__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A__ )
self.assertIsInstance(processor_fast.image_processor , A__ )
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
'''simple docstring'''
a__ : Optional[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a__ : Union[str, Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
a__ : List[str] = self.get_image_processor(do_normalize=A__ )
a__ : List[Any] = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=A__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A__ )
def __lowerCAmelCase ( self : Tuple ) -> Any:
'''simple docstring'''
a__ : Dict = self.get_image_processor()
a__ : int = self.get_tokenizer()
a__ : Union[str, Any] = OwlViTProcessor(tokenizer=A__ , image_processor=A__ )
a__ : Optional[Any] = self.prepare_image_inputs()
a__ : Optional[int] = image_processor(A__ , return_tensors='''np''' )
a__ : Optional[int] = processor(images=A__ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
'''simple docstring'''
a__ : str = self.get_image_processor()
a__ : Union[str, Any] = self.get_tokenizer()
a__ : Optional[Any] = OwlViTProcessor(tokenizer=A__ , image_processor=A__ )
a__ : Tuple = '''lower newer'''
a__ : Optional[Any] = processor(text=A__ , return_tensors='''np''' )
a__ : Union[str, Any] = tokenizer(A__ , return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def __lowerCAmelCase ( self : Any ) -> Dict:
'''simple docstring'''
a__ : Tuple = self.get_image_processor()
a__ : str = self.get_tokenizer()
a__ : Union[str, Any] = OwlViTProcessor(tokenizer=A__ , image_processor=A__ )
a__ : Tuple = '''lower newer'''
a__ : str = self.prepare_image_inputs()
a__ : Any = processor(text=A__ , images=A__ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(A__ ):
processor()
def __lowerCAmelCase ( self : int ) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] = '''google/owlvit-base-patch32'''
a__ : List[Any] = OwlViTProcessor.from_pretrained(A__ )
a__ : List[str] = ['''cat''', '''nasa badge''']
a__ : Tuple = processor(text=A__ )
a__ : Dict = 1_6
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(A__ ):
processor()
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
a__ : Optional[Any] = '''google/owlvit-base-patch32'''
a__ : List[Any] = OwlViTProcessor.from_pretrained(A__ )
a__ : str = [['''cat''', '''nasa badge'''], ['''person''']]
a__ : Tuple = processor(text=A__ )
a__ : Any = 1_6
a__ : Tuple = len(A__ )
a__ : str = max([len(A__ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(A__ ):
processor()
def __lowerCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
a__ : str = '''google/owlvit-base-patch32'''
a__ : List[str] = OwlViTProcessor.from_pretrained(A__ )
a__ : str = ['''cat''', '''nasa badge''']
a__ : Any = processor(text=A__ )
a__ : Any = 1_6
a__ : List[str] = inputs['''input_ids''']
a__ : List[str] = [
[4_9_4_0_6, 2_3_6_8, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9_4_0_6, 6_8_4_1, 1_1_3_0_1, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
a__ : List[str] = self.get_image_processor()
a__ : Tuple = self.get_tokenizer()
a__ : Any = OwlViTProcessor(tokenizer=A__ , image_processor=A__ )
a__ : Tuple = self.prepare_image_inputs()
a__ : Optional[int] = self.prepare_image_inputs()
a__ : int = processor(images=A__ , query_images=A__ )
self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(A__ ):
processor()
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
a__ : Optional[int] = self.get_image_processor()
a__ : Any = self.get_tokenizer()
a__ : int = OwlViTProcessor(tokenizer=A__ , image_processor=A__ )
a__ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ : str = processor.batch_decode(A__ )
a__ : Union[str, Any] = tokenizer.batch_decode(A__ )
self.assertListEqual(A__ , A__ )
| 688 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __a ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] ):
# Construct model
if gpta_config_file == "":
a__ : Union[str, Any] = GPTaConfig()
else:
a__ : Dict = GPTaConfig.from_json_file(lowerCAmelCase__ )
a__ : Optional[int] = GPTaModel(lowerCAmelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
a__ : int = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
a__ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 688 | 1 |
'''simple docstring'''
import numpy as np
__SCREAMING_SNAKE_CASE = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Dict ) -> None:
'''simple docstring'''
a__ : int = np.array(A__ )
def __lowerCAmelCase ( self : int , A__ : str ) -> np.ndarray:
'''simple docstring'''
a__ , a__ : Optional[Any] = np.where(letter == self.SQUARE )
a__ : List[str] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __lowerCAmelCase ( self : int , A__ : int , A__ : int ) -> str:
'''simple docstring'''
a__ : int = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __lowerCAmelCase ( self : str , A__ : str ) -> str:
'''simple docstring'''
a__ : Optional[Any] = message.lower()
a__ : Tuple = message.replace(''' ''' , '''''' )
a__ : Optional[int] = message.replace('''j''' , '''i''' )
a__ : Any = np.empty((2, len(A__ )) )
for letter_index in range(len(A__ ) ):
a__ : Any = self.letter_to_numbers(message[letter_index] )
a__ : Any = numbers[0]
a__ : int = numbers[1]
a__ : Union[str, Any] = first_step.reshape(2 * len(A__ ) )
a__ : str = ''''''
for numbers_index in range(len(A__ ) ):
a__ : Tuple = int(second_step[numbers_index * 2] )
a__ : Tuple = int(second_step[(numbers_index * 2) + 1] )
a__ : Union[str, Any] = self.numbers_to_letter(A__ , A__ )
a__ : Union[str, Any] = encoded_message + letter
return encoded_message
def __lowerCAmelCase ( self : Dict , A__ : str ) -> str:
'''simple docstring'''
a__ : Tuple = message.lower()
message.replace(''' ''' , '''''' )
a__ : int = np.empty(2 * len(A__ ) )
for letter_index in range(len(A__ ) ):
a__ : Optional[int] = self.letter_to_numbers(message[letter_index] )
a__ : Tuple = numbers[0]
a__ : List[Any] = numbers[1]
a__ : Optional[Any] = first_step.reshape((2, len(A__ )) )
a__ : Any = ''''''
for numbers_index in range(len(A__ ) ):
a__ : Union[str, Any] = int(second_step[0, numbers_index] )
a__ : Union[str, Any] = int(second_step[1, numbers_index] )
a__ : Dict = self.numbers_to_letter(A__ , A__ )
a__ : List[str] = decoded_message + letter
return decoded_message
| 688 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
__SCREAMING_SNAKE_CASE = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
__SCREAMING_SNAKE_CASE = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
__SCREAMING_SNAKE_CASE = reader.read()
__SCREAMING_SNAKE_CASE = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
__SCREAMING_SNAKE_CASE = UNetaDModel(**config)
else:
__SCREAMING_SNAKE_CASE = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
__SCREAMING_SNAKE_CASE = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__SCREAMING_SNAKE_CASE = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__SCREAMING_SNAKE_CASE = config[key]
del config[key]
__SCREAMING_SNAKE_CASE = [k.replace('UNetRes', '') for k in config['down_block_types']]
__SCREAMING_SNAKE_CASE = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
__SCREAMING_SNAKE_CASE = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
__SCREAMING_SNAKE_CASE = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
__SCREAMING_SNAKE_CASE = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
__SCREAMING_SNAKE_CASE = param_value
__SCREAMING_SNAKE_CASE = True
if not has_changed:
__SCREAMING_SNAKE_CASE = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 688 | 1 |
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__SCREAMING_SNAKE_CASE = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__SCREAMING_SNAKE_CASE = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def __a ( lowerCAmelCase__ : List[str] ):
a__ : Union[str, Any] = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=lowerCAmelCase__ )[0]
@deprecated(lowerCAmelCase__ , '''Please use tf.data to implement this functionality.''' )
def __a ( lowerCAmelCase__ : Any ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=lowerCAmelCase__ ) as bytestream:
a__ : Dict = _readaa(lowerCAmelCase__ )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
a__ : Any = _readaa(lowerCAmelCase__ )
a__ : Optional[int] = _readaa(lowerCAmelCase__ )
a__ : Tuple = _readaa(lowerCAmelCase__ )
a__ : Any = bytestream.read(rows * cols * num_images )
a__ : Union[str, Any] = numpy.frombuffer(lowerCAmelCase__ , dtype=numpy.uinta )
a__ : str = data.reshape(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , 1 )
return data
@deprecated(lowerCAmelCase__ , '''Please use tf.one_hot on tensors.''' )
def __a ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] ):
a__ : Optional[Any] = labels_dense.shape[0]
a__ : Tuple = numpy.arange(lowerCAmelCase__ ) * num_classes
a__ : Tuple = numpy.zeros((num_labels, num_classes) )
a__ : List[Any] = 1
return labels_one_hot
@deprecated(lowerCAmelCase__ , '''Please use tf.data to implement this functionality.''' )
def __a ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : Tuple=10 ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=lowerCAmelCase__ ) as bytestream:
a__ : Optional[int] = _readaa(lowerCAmelCase__ )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
a__ : Dict = _readaa(lowerCAmelCase__ )
a__ : List[str] = bytestream.read(lowerCAmelCase__ )
a__ : List[Any] = numpy.frombuffer(lowerCAmelCase__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(lowerCAmelCase__ , lowerCAmelCase__ )
return labels
class lowerCAmelCase__ :
"""simple docstring"""
@deprecated(
A__ , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Optional[Any] , A__ : Union[str, Any] , A__ : Union[str, Any] , A__ : List[Any]=False , A__ : str=False , A__ : Dict=dtypes.floataa , A__ : Tuple=True , A__ : str=None , ) -> Optional[int]:
'''simple docstring'''
a__ , a__ : Optional[Any] = random_seed.get_seed(A__ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
a__ : Any = dtypes.as_dtype(A__ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
a__ : Tuple = 1_0_0_0_0
a__ : Any = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'images.shape: {images.shape} labels.shape: {labels.shape}'
a__ : List[str] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
a__ : Optional[Any] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
a__ : Union[str, Any] = images.astype(numpy.floataa )
a__ : Optional[Any] = numpy.multiply(A__ , 1.0 / 255.0 )
a__ : Dict = images
a__ : List[Any] = labels
a__ : int = 0
a__ : Optional[Any] = 0
@property
def __lowerCAmelCase ( self : int ) -> int:
'''simple docstring'''
return self._images
@property
def __lowerCAmelCase ( self : Dict ) -> Optional[int]:
'''simple docstring'''
return self._labels
@property
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return self._num_examples
@property
def __lowerCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
return self._epochs_completed
def __lowerCAmelCase ( self : List[Any] , A__ : int , A__ : Tuple=False , A__ : Optional[Any]=True ) -> Any:
'''simple docstring'''
if fake_data:
a__ : str = [1] * 7_8_4
a__ : List[Any] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(A__ )],
[fake_label for _ in range(A__ )],
)
a__ : int = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
a__ : Dict = numpy.arange(self._num_examples )
numpy.random.shuffle(A__ )
a__ : Union[str, Any] = self.images[perma]
a__ : str = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
a__ : Optional[Any] = self._num_examples - start
a__ : List[Any] = self._images[start : self._num_examples]
a__ : List[Any] = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
a__ : Optional[Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(A__ )
a__ : Any = self.images[perm]
a__ : int = self.labels[perm]
# Start next epoch
a__ : List[Any] = 0
a__ : int = batch_size - rest_num_examples
a__ : List[str] = self._index_in_epoch
a__ : Union[str, Any] = self._images[start:end]
a__ : Union[str, Any] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
a__ : List[str] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(lowerCAmelCase__ , '''Please write your own downloading logic.''' )
def __a ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] ):
if not gfile.Exists(lowerCAmelCase__ ):
gfile.MakeDirs(lowerCAmelCase__ )
a__ : List[Any] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
if not gfile.Exists(lowerCAmelCase__ ):
urllib.request.urlretrieve(lowerCAmelCase__ , lowerCAmelCase__ ) # noqa: S310
with gfile.GFile(lowerCAmelCase__ ) as f:
a__ : str = f.size()
print('''Successfully downloaded''' , lowerCAmelCase__ , lowerCAmelCase__ , '''bytes.''' )
return filepath
@deprecated(
lowerCAmelCase__ , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def __a ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int=False , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Any=dtypes.floataa , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : str=5000 , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Optional[Any]=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=lowerCAmelCase__ , one_hot=lowerCAmelCase__ , dtype=lowerCAmelCase__ , seed=lowerCAmelCase__ )
a__ : Optional[Any] = fake()
a__ : List[str] = fake()
a__ : int = fake()
return _Datasets(train=lowerCAmelCase__ , validation=lowerCAmelCase__ , test=lowerCAmelCase__ )
if not source_url: # empty string check
a__ : Optional[Any] = DEFAULT_SOURCE_URL
a__ : Optional[int] = '''train-images-idx3-ubyte.gz'''
a__ : int = '''train-labels-idx1-ubyte.gz'''
a__ : Dict = '''t10k-images-idx3-ubyte.gz'''
a__ : Tuple = '''t10k-labels-idx1-ubyte.gz'''
a__ : List[str] = _maybe_download(
lowerCAmelCase__ , lowerCAmelCase__ , source_url + train_images_file )
with gfile.Open(lowerCAmelCase__ , '''rb''' ) as f:
a__ : Dict = _extract_images(lowerCAmelCase__ )
a__ : Dict = _maybe_download(
lowerCAmelCase__ , lowerCAmelCase__ , source_url + train_labels_file )
with gfile.Open(lowerCAmelCase__ , '''rb''' ) as f:
a__ : Optional[Any] = _extract_labels(lowerCAmelCase__ , one_hot=lowerCAmelCase__ )
a__ : Any = _maybe_download(
lowerCAmelCase__ , lowerCAmelCase__ , source_url + test_images_file )
with gfile.Open(lowerCAmelCase__ , '''rb''' ) as f:
a__ : Dict = _extract_images(lowerCAmelCase__ )
a__ : Optional[int] = _maybe_download(
lowerCAmelCase__ , lowerCAmelCase__ , source_url + test_labels_file )
with gfile.Open(lowerCAmelCase__ , '''rb''' ) as f:
a__ : List[Any] = _extract_labels(lowerCAmelCase__ , one_hot=lowerCAmelCase__ )
if not 0 <= validation_size <= len(lowerCAmelCase__ ):
a__ : Any = (
'''Validation size should be between 0 and '''
F'{len(lowerCAmelCase__ )}. Received: {validation_size}.'
)
raise ValueError(lowerCAmelCase__ )
a__ : Any = train_images[:validation_size]
a__ : List[Any] = train_labels[:validation_size]
a__ : Dict = train_images[validation_size:]
a__ : Optional[int] = train_labels[validation_size:]
a__ : List[Any] = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
a__ : Any = _DataSet(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Tuple = _DataSet(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Optional[int] = _DataSet(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
return _Datasets(train=lowerCAmelCase__ , validation=lowerCAmelCase__ , test=lowerCAmelCase__ )
| 688 |
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = (KDPMaDiscreteScheduler,)
__UpperCamelCase = 10
def __lowerCAmelCase ( self : Optional[Any] , **A__ : Optional[int] ) -> int:
'''simple docstring'''
a__ : Optional[int] = {
'''num_train_timesteps''': 1_1_0_0,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**A__ )
return config
def __lowerCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A__ )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=A__ , beta_end=A__ )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A__ )
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A__ )
def __lowerCAmelCase ( self : str ) -> Optional[int]:
'''simple docstring'''
a__ : Any = self.scheduler_classes[0]
a__ : str = self.get_scheduler_config(prediction_type='''v_prediction''' )
a__ : Dict = scheduler_class(**A__ )
scheduler.set_timesteps(self.num_inference_steps )
a__ : Tuple = self.dummy_model()
a__ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
a__ : Dict = sample.to(A__ )
for i, t in enumerate(scheduler.timesteps ):
a__ : Optional[Any] = scheduler.scale_model_input(A__ , A__ )
a__ : Union[str, Any] = model(A__ , A__ )
a__ : List[str] = scheduler.step(A__ , A__ , A__ )
a__ : Optional[Any] = output.prev_sample
a__ : Tuple = torch.sum(torch.abs(A__ ) )
a__ : Optional[int] = torch.mean(torch.abs(A__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0_002 ) < 1E-3
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
if torch_device == "mps":
return
a__ : List[Any] = self.scheduler_classes[0]
a__ : Tuple = self.get_scheduler_config()
a__ : Tuple = scheduler_class(**A__ )
scheduler.set_timesteps(self.num_inference_steps )
a__ : List[Any] = self.dummy_model()
a__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
a__ : Any = sample.to(A__ )
for i, t in enumerate(scheduler.timesteps ):
a__ : str = scheduler.scale_model_input(A__ , A__ )
a__ : List[str] = model(A__ , A__ )
a__ : str = scheduler.step(A__ , A__ , A__ )
a__ : List[Any] = output.prev_sample
a__ : Dict = torch.sum(torch.abs(A__ ) )
a__ : Optional[Any] = torch.mean(torch.abs(A__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
def __lowerCAmelCase ( self : str ) -> int:
'''simple docstring'''
if torch_device == "mps":
return
a__ : Optional[int] = self.scheduler_classes[0]
a__ : Tuple = self.get_scheduler_config()
a__ : List[Any] = scheduler_class(**A__ )
scheduler.set_timesteps(self.num_inference_steps , device=A__ )
a__ : Union[str, Any] = self.dummy_model()
a__ : List[Any] = self.dummy_sample_deter.to(A__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
a__ : Optional[int] = scheduler.scale_model_input(A__ , A__ )
a__ : List[Any] = model(A__ , A__ )
a__ : Any = scheduler.step(A__ , A__ , A__ )
a__ : List[str] = output.prev_sample
a__ : Any = torch.sum(torch.abs(A__ ) )
a__ : Union[str, Any] = torch.mean(torch.abs(A__ ) )
if str(A__ ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
| 688 | 1 |
'''simple docstring'''
def __a ( lowerCAmelCase__ : int ):
a__ : str = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __a ( lowerCAmelCase__ : int = 100 ):
a__ : List[Any] = 1
a__ : Optional[int] = 2
for i in range(2 , max_n + 1 ):
a__ : int = pre_numerator
a__ : Optional[Any] = 2 * i // 3 if i % 3 == 0 else 1
a__ : int = cur_numerator
a__ : Optional[int] = e_cont * pre_numerator + temp
return sum_digits(lowerCAmelCase__ )
if __name__ == "__main__":
print(f'{solution() = }')
| 688 |
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
a__ : str = ['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
a__ , a__ : List[Any] = get_aligned_output_features_output_indices(A__ , A__ , A__ )
self.assertEqual(A__ , ['''c'''] )
self.assertEqual(A__ , [2] )
# Out indices set to match out features
a__ , a__ : Optional[int] = get_aligned_output_features_output_indices(['''a''', '''c'''] , A__ , A__ )
self.assertEqual(A__ , ['''a''', '''c'''] )
self.assertEqual(A__ , [0, 2] )
# Out features set to match out indices
a__ , a__ : int = get_aligned_output_features_output_indices(A__ , [0, 2] , A__ )
self.assertEqual(A__ , ['''a''', '''c'''] )
self.assertEqual(A__ , [0, 2] )
# Out features selected from negative indices
a__ , a__ : List[str] = get_aligned_output_features_output_indices(A__ , [-3, -1] , A__ )
self.assertEqual(A__ , ['''a''', '''c'''] )
self.assertEqual(A__ , [-3, -1] )
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , A__ )
# Out features must be a list
with self.assertRaises(A__ ):
verify_out_features_out_indices(('''a''', '''b''') , (0, 1) , ['''a''', '''b'''] )
# Out features must be a subset of stage names
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , ['''a'''] )
# Out indices must be a list or tuple
with self.assertRaises(A__ ):
verify_out_features_out_indices(A__ , 0 , ['''a''', '''b'''] )
# Out indices must be a subset of stage names
with self.assertRaises(A__ ):
verify_out_features_out_indices(A__ , (0, 1) , ['''a'''] )
# Out features and out indices must be the same length
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0,) , ['''a''', '''b''', '''c'''] )
# Out features should match out indices
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 2) , ['''a''', '''b''', '''c'''] )
# Out features and out indices should be in order
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''b''', '''a'''] , (0, 1) , ['''a''', '''b'''] )
# Check passes with valid inputs
verify_out_features_out_indices(['''a''', '''b''', '''d'''] , (0, 1, -1) , ['''a''', '''b''', '''c''', '''d'''] )
def __lowerCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
a__ : Optional[Any] = BackboneMixin()
a__ : int = ['''a''', '''b''', '''c''']
a__ : List[Any] = ['''a''', '''c''']
a__ : Tuple = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
a__ : Dict = ['''a''', '''b''']
self.assertEqual(backbone.out_features , ['''a''', '''b'''] )
self.assertEqual(backbone.out_indices , [0, 1] )
a__ : int = [-3, -1]
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 688 | 1 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
__SCREAMING_SNAKE_CASE = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
__SCREAMING_SNAKE_CASE = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
__SCREAMING_SNAKE_CASE = reader.read()
__SCREAMING_SNAKE_CASE = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
__SCREAMING_SNAKE_CASE = UNetaDModel(**config)
else:
__SCREAMING_SNAKE_CASE = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
__SCREAMING_SNAKE_CASE = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__SCREAMING_SNAKE_CASE = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__SCREAMING_SNAKE_CASE = config[key]
del config[key]
__SCREAMING_SNAKE_CASE = [k.replace('UNetRes', '') for k in config['down_block_types']]
__SCREAMING_SNAKE_CASE = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
__SCREAMING_SNAKE_CASE = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
__SCREAMING_SNAKE_CASE = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
__SCREAMING_SNAKE_CASE = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
__SCREAMING_SNAKE_CASE = param_value
__SCREAMING_SNAKE_CASE = True
if not has_changed:
__SCREAMING_SNAKE_CASE = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 688 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __a ( lowerCAmelCase__ : List[Any] ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def __a ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any ):
a__ : Dict = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
a__ : Any = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' )
a__ : int = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' )
a__ : Optional[Any] = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' )
a__ : Dict = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' )
a__ : List[str] = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' )
a__ : List[Any] = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' )
a__ : str = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' )
a__ : List[Any] = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' )
a__ : List[Any] = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' )
a__ : str = key.replace('''image_encoder.module''' , '''flava.image_model''' )
a__ : Dict = key.replace('''text_encoder.module''' , '''flava.text_model''' )
a__ : List[Any] = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' )
a__ : List[str] = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' )
a__ : List[str] = key.replace('''text_projection''' , '''flava.text_projection''' )
a__ : Any = key.replace('''image_projection''' , '''flava.image_projection''' )
a__ : Any = value.float()
for key, value in codebook_state_dict.items():
a__ : List[str] = value
return upgrade
@torch.no_grad()
def __a ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict=None ):
if config_path is not None:
a__ : Tuple = FlavaConfig.from_pretrained(lowerCAmelCase__ )
else:
a__ : Optional[int] = FlavaConfig()
a__ : List[Any] = FlavaForPreTraining(lowerCAmelCase__ ).eval()
a__ : Optional[int] = convert_dalle_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , save_checkpoint=lowerCAmelCase__ )
if os.path.exists(lowerCAmelCase__ ):
a__ : List[str] = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
else:
a__ : Dict = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='''cpu''' )
a__ : List[Any] = upgrade_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
hf_model.load_state_dict(lowerCAmelCase__ )
a__ : Any = hf_model.state_dict()
a__ : Optional[Any] = count_parameters(lowerCAmelCase__ )
a__ : int = count_parameters(lowerCAmelCase__ ) + count_parameters(lowerCAmelCase__ )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 )
hf_model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 688 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.