code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {}
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = super().add_tokens(__SCREAMING_SNAKE_CASE,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
if num_added_tokens == 0:
raise ValueError(
f'The tokenizer already contains the token {placeholder_token}. Please pass a different'
""" `placeholder_token` that is not already in the tokenizer.""" )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,*__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=1,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = []
if num_vec_per_token == 1:
self.try_adding_tokens(__SCREAMING_SNAKE_CASE,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
output.append(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = []
for i in range(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = placeholder_token + f'_{i}'
self.try_adding_tokens(__SCREAMING_SNAKE_CASE,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
output.append(__SCREAMING_SNAKE_CASE )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f'The tokenizer already has placeholder token {token} that can get confused with'
f' {placeholder_token}keep placeholder tokens independent' )
__lowerCAmelCase = output
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=1.0 ):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = []
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i],vector_shuffle=__SCREAMING_SNAKE_CASE ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
__lowerCAmelCase = self.token_map[placeholder_token]
__lowerCAmelCase = tokens[: 1 + int(len(__SCREAMING_SNAKE_CASE ) * prop_tokens_to_load )]
if vector_shuffle:
__lowerCAmelCase = copy.copy(__SCREAMING_SNAKE_CASE )
random.shuffle(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = text.replace(__SCREAMING_SNAKE_CASE,""" """.join(__SCREAMING_SNAKE_CASE ) )
return text
def __call__( self,__SCREAMING_SNAKE_CASE,*__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=1.0,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
__SCREAMING_SNAKE_CASE,vector_shuffle=__SCREAMING_SNAKE_CASE,prop_tokens_to_load=__SCREAMING_SNAKE_CASE ),*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,*__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=1.0,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
__SCREAMING_SNAKE_CASE,vector_shuffle=__SCREAMING_SNAKE_CASE,prop_tokens_to_load=__SCREAMING_SNAKE_CASE ),*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
| 689 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_a : Tuple = logging.get_logger(__name__)
_a : Optional[int] = ["""model.decoder.embed_positions.weights"""]
def _lowerCAmelCase ( lowercase ) -> Optional[Any]:
if "emb" in name:
__lowerCAmelCase = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
__lowerCAmelCase = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
__lowerCAmelCase = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
__lowerCAmelCase = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
__lowerCAmelCase = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
__lowerCAmelCase = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
__lowerCAmelCase = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
__lowerCAmelCase = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
__lowerCAmelCase = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
__lowerCAmelCase = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> Tuple[Dict, Dict]:
__lowerCAmelCase = list(state_dict.keys() )
__lowerCAmelCase = {}
for key in keys:
__lowerCAmelCase = state_dict.pop(lowercase )
__lowerCAmelCase = rename_keys(lowercase )
if "in_proj_weight" in key:
# split fused qkv proj
__lowerCAmelCase = val[:hidden_size, :]
__lowerCAmelCase = val[hidden_size : 2 * hidden_size, :]
__lowerCAmelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__lowerCAmelCase = val
else:
__lowerCAmelCase = val
return state_dict, enc_dec_proj_state_dict
def _lowerCAmelCase ( lowercase ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
__lowerCAmelCase = 1024
__lowerCAmelCase = 24
__lowerCAmelCase = 16
elif checkpoint == "medium":
__lowerCAmelCase = 1536
__lowerCAmelCase = 48
__lowerCAmelCase = 24
elif checkpoint == "large":
__lowerCAmelCase = 2048
__lowerCAmelCase = 48
__lowerCAmelCase = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
__lowerCAmelCase = MusicgenDecoderConfig(
hidden_size=lowercase , ffn_dim=hidden_size * 4 , num_hidden_layers=lowercase , num_attention_heads=lowercase , )
return config
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase=None , lowercase=None , lowercase="cpu" ) -> Optional[Any]:
__lowerCAmelCase = MusicGen.get_pretrained(lowercase , device=lowercase )
__lowerCAmelCase = decoder_config_from_checkpoint(lowercase )
__lowerCAmelCase = fairseq_model.lm.state_dict()
__lowerCAmelCase , __lowerCAmelCase = rename_state_dict(
lowercase , hidden_size=decoder_config.hidden_size )
__lowerCAmelCase = TaEncoderModel.from_pretrained("""t5-base""" )
__lowerCAmelCase = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
__lowerCAmelCase = MusicgenForCausalLM(lowercase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__lowerCAmelCase , __lowerCAmelCase = decoder.load_state_dict(lowercase , strict=lowercase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase )
if len(lowercase ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(lowercase ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
__lowerCAmelCase = MusicgenForConditionalGeneration(text_encoder=lowercase , audio_encoder=lowercase , decoder=lowercase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase )
# check we can do a forward pass
__lowerCAmelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__lowerCAmelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__lowerCAmelCase = model(input_ids=lowercase , decoder_input_ids=lowercase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
__lowerCAmelCase = AutoTokenizer.from_pretrained("""t5-base""" )
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
__lowerCAmelCase = MusicgenProcessor(feature_extractor=lowercase , tokenizer=lowercase )
# set the appropriate bos/pad token ids
__lowerCAmelCase = 2048
__lowerCAmelCase = 2048
# set other default generation config params
__lowerCAmelCase = int(30 * audio_encoder.config.frame_rate )
__lowerCAmelCase = True
__lowerCAmelCase = 3.0
if pytorch_dump_folder is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(lowercase )
processor.save_pretrained(lowercase )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(lowercase )
processor.push_to_hub(lowercase )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
_a : List[Any] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 689 | 1 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[str] ="""char"""
a : str ="""bpe"""
a : Union[str, Any] ="""wp"""
_a : Tuple = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Optional[int] =["""image_processor""", """char_tokenizer"""]
a : Union[str, Any] ="""ViTImageProcessor"""
a : Optional[int] ="""MgpstrTokenizer"""
def __init__( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""",__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = kwargs.pop("""feature_extractor""" )
__lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
__lowerCAmelCase = tokenizer
__lowerCAmelCase = AutoTokenizer.from_pretrained("""gpt2""" )
__lowerCAmelCase = AutoTokenizer.from_pretrained("""bert-base-uncased""" )
super().__init__(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def __call__( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
__lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
if text is not None:
__lowerCAmelCase = self.char_tokenizer(__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCAmelCase = encodings["""input_ids"""]
return inputs
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = sequences
__lowerCAmelCase = char_preds.size(0 )
__lowerCAmelCase , __lowerCAmelCase = self._decode_helper(__SCREAMING_SNAKE_CASE,"""char""" )
__lowerCAmelCase , __lowerCAmelCase = self._decode_helper(__SCREAMING_SNAKE_CASE,"""bpe""" )
__lowerCAmelCase , __lowerCAmelCase = self._decode_helper(__SCREAMING_SNAKE_CASE,"""wp""" )
__lowerCAmelCase = []
__lowerCAmelCase = []
for i in range(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = [char_scores[i], bpe_scores[i], wp_scores[i]]
__lowerCAmelCase = [char_strs[i], bpe_strs[i], wp_strs[i]]
__lowerCAmelCase = scores.index(max(__SCREAMING_SNAKE_CASE ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__lowerCAmelCase = {}
__lowerCAmelCase = final_strs
__lowerCAmelCase = final_scores
__lowerCAmelCase = char_strs
__lowerCAmelCase = bpe_strs
__lowerCAmelCase = wp_strs
return out
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
__lowerCAmelCase = self.char_decode
__lowerCAmelCase = 1
__lowerCAmelCase = """[s]"""
elif format == DecodeType.BPE:
__lowerCAmelCase = self.bpe_decode
__lowerCAmelCase = 2
__lowerCAmelCase = """#"""
elif format == DecodeType.WORDPIECE:
__lowerCAmelCase = self.wp_decode
__lowerCAmelCase = 1_02
__lowerCAmelCase = """[SEP]"""
else:
raise ValueError(f'Format {format} is not supported.' )
__lowerCAmelCase , __lowerCAmelCase = [], []
__lowerCAmelCase = pred_logits.size(0 )
__lowerCAmelCase = pred_logits.size(1 )
__lowerCAmelCase , __lowerCAmelCase = pred_logits.topk(1,dim=-1,largest=__SCREAMING_SNAKE_CASE,sorted=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = preds_index.view(-1,__SCREAMING_SNAKE_CASE )[:, 1:]
__lowerCAmelCase = decoder(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase = torch.nn.functional.softmax(__SCREAMING_SNAKE_CASE,dim=2 ).max(dim=2 )
__lowerCAmelCase = preds_max_prob[:, 1:]
for index in range(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = preds_str[index].find(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = preds_str[index][:pred_eos]
__lowerCAmelCase = preds_index[index].cpu().tolist()
__lowerCAmelCase = pred_index.index(__SCREAMING_SNAKE_CASE ) if eos_token in pred_index else -1
__lowerCAmelCase = preds_max_prob[index][: pred_eos_index + 1]
__lowerCAmelCase = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__SCREAMING_SNAKE_CASE )
conf_scores.append(__SCREAMING_SNAKE_CASE )
return dec_strs, conf_scores
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = [seq.replace(""" ""","""""" ) for seq in self.char_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )]
return decode_strs
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = [seq.replace(""" ""","""""" ) for seq in self.wp_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )]
return decode_strs
| 689 |
'''simple docstring'''
from collections import deque
def _lowerCAmelCase ( lowercase ) -> Dict:
__lowerCAmelCase = len(lowercase )
__lowerCAmelCase = deque()
__lowerCAmelCase = [False for _ in range(lowercase )]
__lowerCAmelCase = [-1 for _ in range(lowercase )]
__lowerCAmelCase = index_of[:]
def strong_connect(lowercase , lowercase , lowercase ):
__lowerCAmelCase = index # the number when this node is seen
__lowerCAmelCase = index # lowest rank node reachable from here
index += 1
stack.append(lowercase )
__lowerCAmelCase = True
for w in g[v]:
if index_of[w] == -1:
__lowerCAmelCase = strong_connect(lowercase , lowercase , lowercase )
__lowerCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__lowerCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__lowerCAmelCase = []
__lowerCAmelCase = stack.pop()
__lowerCAmelCase = False
component.append(lowercase )
while w != v:
__lowerCAmelCase = stack.pop()
__lowerCAmelCase = False
component.append(lowercase )
components.append(lowercase )
return index
__lowerCAmelCase = []
for v in range(lowercase ):
if index_of[v] == -1:
strong_connect(lowercase , 0 , lowercase )
return components
def _lowerCAmelCase ( lowercase , lowercase ) -> str:
__lowerCAmelCase = [[] for _ in range(lowercase )]
for u, v in edges:
g[u].append(lowercase )
return g
if __name__ == "__main__":
# Test
_a : Any = 7
_a : Tuple = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_a : Optional[int] = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_a : Optional[Any] = [(u, v) for u, v in zip(source, target)]
_a : Optional[int] = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 689 | 1 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
_a : Tuple = """https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
_a : Optional[Any] = BASE_URL + """/user"""
# https://github.com/settings/tokens
_a : str = os.environ.get("""USER_TOKEN""", """""")
def _lowerCAmelCase ( lowercase ) -> dict[Any, Any]:
__lowerCAmelCase = {
"""Authorization""": f'token {auth_token}',
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(lowercase , headers=lowercase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'{key}: {value}')
else:
raise ValueError("""'USER_TOKEN' field cannot be empty.""")
| 689 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ) -> Union[str, Any]:
__lowerCAmelCase = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
__lowerCAmelCase = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(lowercase )
# Let's go
__lowerCAmelCase = parser.parse_args()
if not hasattr(lowercase , """func""" ):
parser.print_help()
exit(1 )
# Run
__lowerCAmelCase = args.func(lowercase )
service.run()
if __name__ == "__main__":
main()
| 689 | 1 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __lt__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self[-1] < other[-1]
def __eq__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self[-1] == other[-1]
def _lowerCAmelCase ( lowercase ) -> list:
__lowerCAmelCase = []
# sort into stacks
for element in collection:
__lowerCAmelCase = Stack([element] )
__lowerCAmelCase = bisect_left(lowercase , lowercase )
if i != len(lowercase ):
stacks[i].append(lowercase )
else:
stacks.append(lowercase )
# use a heap-based merge to merge stack efficiently
__lowerCAmelCase = merge(*(reversed(lowercase ) for stack in stacks) )
return collection
if __name__ == "__main__":
_a : Any = input("""Enter numbers separated by a comma:\n""").strip()
_a : Optional[Any] = [int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 689 |
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_a : List[Any] = logging.get_logger(__name__)
_a : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
_a : Any = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> str:
for attribute in key.split(""".""" ):
__lowerCAmelCase = getattr(lowercase , lowercase )
if weight_type is not None:
__lowerCAmelCase = getattr(lowercase , lowercase ).shape
else:
__lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
__lowerCAmelCase = value
elif weight_type == "weight_g":
__lowerCAmelCase = value
elif weight_type == "weight_v":
__lowerCAmelCase = value
elif weight_type == "bias":
__lowerCAmelCase = value
else:
__lowerCAmelCase = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]:
__lowerCAmelCase = []
__lowerCAmelCase = fairseq_model.state_dict()
__lowerCAmelCase = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , )
__lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
__lowerCAmelCase = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split(""".""" )[:-1] ) != key):
# special case since naming is very similar
continue
__lowerCAmelCase = True
if "*" in mapped_key:
__lowerCAmelCase = name.split(lowercase )[0].split(""".""" )[-2]
__lowerCAmelCase = mapped_key.replace("""*""" , lowercase )
if "weight_g" in name:
__lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
__lowerCAmelCase = """weight_v"""
elif "bias" in name:
__lowerCAmelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase = """weight"""
else:
__lowerCAmelCase = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__lowerCAmelCase = full_name.split("""conv_layers.""" )[-1]
__lowerCAmelCase = name.split(""".""" )
__lowerCAmelCase = int(items[0] )
__lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Dict:
if config_path is not None:
__lowerCAmelCase = UniSpeechSatConfig.from_pretrained(lowercase )
else:
__lowerCAmelCase = UniSpeechSatConfig()
__lowerCAmelCase = """"""
if is_finetuned:
__lowerCAmelCase = UniSpeechSatForCTC(lowercase )
else:
__lowerCAmelCase = UniSpeechSatForPreTraining(lowercase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
__lowerCAmelCase = model[0].eval()
recursively_load_weights(lowercase , lowercase )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
_a : List[str] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_a : Union[str, Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 689 | 1 |
'''simple docstring'''
from __future__ import annotations
_a : Union[str, Any] = """#"""
class _UpperCAmelCase :
def __init__( self ):
'''simple docstring'''
__lowerCAmelCase = {}
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self._trie
for char in text:
if char not in trie:
__lowerCAmelCase = {}
__lowerCAmelCase = trie[char]
__lowerCAmelCase = True
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self._trie
for char in prefix:
if char in trie:
__lowerCAmelCase = trie[char]
else:
return []
return self._elements(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = []
for c, v in d.items():
__lowerCAmelCase = [""" """] if c == END else [(c + s) for s in self._elements(__SCREAMING_SNAKE_CASE )]
result.extend(__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
_a : List[str] = Trie()
_a : Dict = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def _lowerCAmelCase ( lowercase ) -> tuple:
__lowerCAmelCase = trie.find_word(lowercase )
return tuple(string + word for word in suffixes )
def _lowerCAmelCase ( ) -> None:
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 689 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
_a : str = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
_a : Dict = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
_a : List[str] = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ),reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = spearmanr(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 689 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase , lowercase ) -> Optional[int]:
__lowerCAmelCase = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> Any:
__lowerCAmelCase = 0
while b > 0:
if b & 1:
__lowerCAmelCase = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 689 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=lowerCAmelCase_ ):
a : List[str] =["""onnx"""]
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(self,["""onnx"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""onnx"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""onnx"""] )
| 689 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a : Tuple = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[int] = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[int] = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
_a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_a : int = logging.get_logger(__name__)
_a : Optional[int] = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[str] ="""gptj"""
a : Optional[int] ={
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self,__SCREAMING_SNAKE_CASE=5_04_00,__SCREAMING_SNAKE_CASE=20_48,__SCREAMING_SNAKE_CASE=40_96,__SCREAMING_SNAKE_CASE=28,__SCREAMING_SNAKE_CASE=16,__SCREAMING_SNAKE_CASE=64,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="gelu_new",__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=1e-5,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=False,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_embd
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_inner
__lowerCAmelCase = rotary_dim
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = use_cache
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(
bos_token_id=__SCREAMING_SNAKE_CASE,eos_token_id=__SCREAMING_SNAKE_CASE,tie_word_embeddings=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = "default",__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False,):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE,task=__SCREAMING_SNAKE_CASE,patching_specs=__SCREAMING_SNAKE_CASE,use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config,"""pad_token_id""",__SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
__lowerCAmelCase = 0
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE,direction="""inputs""" )
__lowerCAmelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._config.n_head
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,):
'''simple docstring'''
__lowerCAmelCase = super(__SCREAMING_SNAKE_CASE,self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE,batch_size=__SCREAMING_SNAKE_CASE,seq_length=__SCREAMING_SNAKE_CASE,is_pair=__SCREAMING_SNAKE_CASE,framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowerCAmelCase = seqlen + 2
__lowerCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCAmelCase = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
__lowerCAmelCase = common_inputs["""attention_mask"""]
if self.use_past:
__lowerCAmelCase = ordered_inputs["""attention_mask"""].dtype
__lowerCAmelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,dtype=__SCREAMING_SNAKE_CASE )],dim=1 )
return ordered_inputs
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 13
| 689 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_a : int = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""",__SCREAMING_SNAKE_CASE,)
super().__init__(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
| 689 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase = 5000_0000 ) -> int:
__lowerCAmelCase = set()
__lowerCAmelCase = int((limit - 24) ** (1 / 2) )
__lowerCAmelCase = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowercase ) ) )
for primea in primes:
__lowerCAmelCase = primea * primea
for primea in primes:
__lowerCAmelCase = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
__lowerCAmelCase = primea * primea * primea * primea
__lowerCAmelCase = square + cube + tetr
if total >= limit:
break
ret.add(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(f'{solution() = }')
| 689 | 1 |
'''simple docstring'''
import os
import sys
import unittest
_a : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_a : Optional[int] = os.path.join(git_repo_path, """src""", """transformers""")
_a : Optional[int] = """
{0} = None
"""
_a : Dict = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
"""
_a : Tuple = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" )
self.assertIsNone(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = find_backend(""" if not is_tokenizers_available():""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""tokenizers""" )
__lowerCAmelCase = find_backend(""" if not is_tensorflow_text_available():""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""tensorflow_text""" )
__lowerCAmelCase = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""sentencepiece_and_tokenizers""" )
__lowerCAmelCase = find_backend(
""" if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""sentencepiece_and_tensorflow_text""" )
__lowerCAmelCase = find_backend(
""" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""sentencepiece_and_tokenizers_and_vision""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""",__SCREAMING_SNAKE_CASE )
self.assertIn("""tensorflow_text""",__SCREAMING_SNAKE_CASE )
self.assertIn("""sentencepiece_and_tokenizers""",__SCREAMING_SNAKE_CASE )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""BertModel""",objects["""torch"""] )
self.assertIn("""TFBertModel""",objects["""tf"""] )
self.assertIn("""FlaxBertModel""",objects["""flax"""] )
self.assertIn("""BertModel""",objects["""torch"""] )
self.assertIn("""TFBertTokenizer""",objects["""tensorflow_text"""] )
self.assertIn("""convert_slow_tokenizer""",objects["""sentencepiece_and_tokenizers"""] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = create_dummy_object("""CONSTANT""","""'torch'""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""\nCONSTANT = None\n""" )
__lowerCAmelCase = create_dummy_object("""function""","""'torch'""" )
self.assertEqual(
__SCREAMING_SNAKE_CASE,"""\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
__lowerCAmelCase = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
"""
__lowerCAmelCase = create_dummy_object("""FakeClass""","""'torch'""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
"""
__lowerCAmelCase = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""],__SCREAMING_SNAKE_CASE )
| 689 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Optional[int] =TextToVideoSDPipeline
a : Optional[int] =TEXT_TO_IMAGE_PARAMS
a : Any =TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a : Union[str, Any] =frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D"""),up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D"""),cross_attention_dim=32,attention_head_dim=4,)
__lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0085,beta_end=0.012,beta_schedule="""scaled_linear""",clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,)
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],latent_channels=4,sample_size=1_28,)
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1e-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=10_00,hidden_act="""gelu""",projection_dim=5_12,)
__lowerCAmelCase = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = TextToVideoSDPipeline(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = """np"""
__lowerCAmelCase = sd_pipe(**__SCREAMING_SNAKE_CASE ).frames
__lowerCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__lowerCAmelCase = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available(),reason="""XFormers attention is only available with CUDA and `xformers` installed""",)
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
__lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowerCAmelCase = pipe.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=25,output_type="""pt""" ).frames
__lowerCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
__lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase = pipe.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=2,output_type="""pt""" ).frames
__lowerCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 689 | 1 |
'''simple docstring'''
import sys
_a : Any = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def _lowerCAmelCase ( lowercase ) -> int:
__lowerCAmelCase = 1
for digit in s:
product *= int(lowercase )
return product
def _lowerCAmelCase ( lowercase = N ) -> int:
__lowerCAmelCase = -sys.maxsize - 1
__lowerCAmelCase = n[:13]
__lowerCAmelCase = 13
while cur_index < len(lowercase ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
__lowerCAmelCase = substr[1:] + n[cur_index]
cur_index += 1
else:
__lowerCAmelCase = max(lowercase , str_eval(lowercase ) )
__lowerCAmelCase = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'{solution() = }')
| 689 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _lowerCAmelCase ( lowercase ) -> Optional[int]:
if not is_accelerate_available():
return method
__lowerCAmelCase = version.parse(accelerate.__version__ ).base_version
if version.parse(lowercase ) < version.parse("""0.17.0""" ):
return method
def wrapper(self , *lowercase , **lowercase ):
if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ):
self._hf_hook.pre_forward(self )
return method(self , *lowercase , **lowercase )
return wrapper
| 689 | 1 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
_a : Optional[Any] = True
except (ImportError, ModuleNotFoundError):
_a : Optional[int] = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def _lowerCAmelCase ( lowercase ) -> str:
re.sub("""<n>""" , """""" , lowercase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowercase ) )
| 689 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
# load base model
__lowerCAmelCase = StableDiffusionPipeline.from_pretrained(lowercase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
__lowerCAmelCase = load_file(lowercase )
__lowerCAmelCase = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
__lowerCAmelCase = pipeline.text_encoder
else:
__lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
__lowerCAmelCase = pipeline.unet
# find the target layer
__lowerCAmelCase = layer_infos.pop(0 )
while len(lowercase ) > -1:
try:
__lowerCAmelCase = curr_layer.__getattr__(lowercase )
if len(lowercase ) > 0:
__lowerCAmelCase = layer_infos.pop(0 )
elif len(lowercase ) == 0:
break
except Exception:
if len(lowercase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
__lowerCAmelCase = layer_infos.pop(0 )
__lowerCAmelCase = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(lowercase )
else:
pair_keys.append(lowercase )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
__lowerCAmelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
__lowerCAmelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase ).unsqueeze(2 ).unsqueeze(3 )
else:
__lowerCAmelCase = state_dict[pair_keys[0]].to(torch.floataa )
__lowerCAmelCase = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase )
# update visited list
for item in pair_keys:
visited.append(lowercase )
return pipeline
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
_a : Optional[int] = parser.parse_args()
_a : Dict = args.base_model_path
_a : Optional[Any] = args.checkpoint_path
_a : Union[str, Any] = args.dump_path
_a : Optional[int] = args.lora_prefix_unet
_a : int = args.lora_prefix_text_encoder
_a : str = args.alpha
_a : Any = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_a : Tuple = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 689 | 1 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_a : List[Any] = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Union[str, Any] =DebertaVaTokenizer
a : Optional[Any] =DebertaVaTokenizerFast
a : Optional[int] =True
a : Optional[int] =True
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = DebertaVaTokenizer(__SCREAMING_SNAKE_CASE,unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = """this is a test"""
__lowerCAmelCase = """this is a test"""
return input_text, output_text
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """<pad>"""
__lowerCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0],"""<pad>""" )
self.assertEqual(vocab_keys[1],"""<unk>""" )
self.assertEqual(vocab_keys[-1],"""[PAD]""" )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ),3_00_01 )
def lowerCamelCase__ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size,3_00_00 )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """ \tHeLLo!how \n Are yoU? """
__lowerCAmelCase = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(__SCREAMING_SNAKE_CASE,do_lower_case=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE ) )
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = DebertaVaTokenizerFast(__SCREAMING_SNAKE_CASE,do_lower_case=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE ) )
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(__SCREAMING_SNAKE_CASE,split_by_punct=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE ) )
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = DebertaVaTokenizerFast(__SCREAMING_SNAKE_CASE,split_by_punct=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE ) )
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(__SCREAMING_SNAKE_CASE,do_lower_case=__SCREAMING_SNAKE_CASE,split_by_punct=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE ) )
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = DebertaVaTokenizerFast(__SCREAMING_SNAKE_CASE,do_lower_case=__SCREAMING_SNAKE_CASE,split_by_punct=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE ) )
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(__SCREAMING_SNAKE_CASE,do_lower_case=__SCREAMING_SNAKE_CASE,split_by_punct=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE ) )
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = DebertaVaTokenizerFast(__SCREAMING_SNAKE_CASE,do_lower_case=__SCREAMING_SNAKE_CASE,split_by_punct=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE ) )
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(__SCREAMING_SNAKE_CASE,do_lower_case=__SCREAMING_SNAKE_CASE,split_by_punct=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE ) )
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = DebertaVaTokenizerFast(__SCREAMING_SNAKE_CASE,do_lower_case=__SCREAMING_SNAKE_CASE,split_by_punct=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE ) )
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """ \tHeLLo!how \n Are yoU? """
__lowerCAmelCase = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(__SCREAMING_SNAKE_CASE,do_lower_case=__SCREAMING_SNAKE_CASE,split_by_punct=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE ) )
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = DebertaVaTokenizerFast(__SCREAMING_SNAKE_CASE,do_lower_case=__SCREAMING_SNAKE_CASE,split_by_punct=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE ) )
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE ) )
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer.encode(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """This is a test"""
__lowerCAmelCase = [13, 1, 43_98, 25, 21, 12_89]
__lowerCAmelCase = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
__lowerCAmelCase = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
__lowerCAmelCase = DebertaVaTokenizer(__SCREAMING_SNAKE_CASE,keep_accents=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = DebertaVaTokenizerFast(__SCREAMING_SNAKE_CASE,keep_accents=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer.encode(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
__lowerCAmelCase = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowerCAmelCase = tokenizer.encode(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = DebertaVaTokenizer(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer.encode("""sequence builders""" )
__lowerCAmelCase = tokenizer.encode("""multi-sequence build""" )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id],__SCREAMING_SNAKE_CASE )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id],__SCREAMING_SNAKE_CASE,)
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = {"""input_ids""": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE,model_name="""microsoft/deberta-v2-xlarge""",revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""",)
| 689 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def _lowerCAmelCase ( lowercase = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2
def _lowerCAmelCase ( lowercase = "" ) -> bool:
if len(lowercase ) == 0:
return True
__lowerCAmelCase = input_str.replace(""" """ , """""" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__lowerCAmelCase = {}
for character in lower_case_input_str:
__lowerCAmelCase = character_freq_dict.get(lowercase , 0 ) + 1
__lowerCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _lowerCAmelCase ( lowercase = "" ) -> None:
print("""\nFor string = """ , lowercase , """:""" )
print(
"""> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(lowercase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
print(
"""> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(lowercase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
if __name__ == "__main__":
_a : int = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
_a : Optional[int] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
| 689 | 1 |
'''simple docstring'''
import numpy as np
def _lowerCAmelCase ( lowercase ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 689 |
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _lowerCAmelCase ( lowercase ) -> List[Any]:
__lowerCAmelCase = VideoMAEConfig()
set_architecture_configs(lowercase , lowercase )
if "finetuned" not in model_name:
__lowerCAmelCase = False
if "finetuned" in model_name:
__lowerCAmelCase = """huggingface/label-files"""
if "kinetics" in model_name:
__lowerCAmelCase = 400
__lowerCAmelCase = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
__lowerCAmelCase = 174
__lowerCAmelCase = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
__lowerCAmelCase = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="""dataset""" ) , """r""" ) )
__lowerCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase ( lowercase , lowercase ) -> Any:
if "small" in model_name:
__lowerCAmelCase = 384
__lowerCAmelCase = 1536
__lowerCAmelCase = 12
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 3
__lowerCAmelCase = 192
__lowerCAmelCase = 768
elif "large" in model_name:
__lowerCAmelCase = 1024
__lowerCAmelCase = 4096
__lowerCAmelCase = 24
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 8
__lowerCAmelCase = 512
__lowerCAmelCase = 2048
elif "huge" in model_name:
__lowerCAmelCase = 1280
__lowerCAmelCase = 5120
__lowerCAmelCase = 32
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 8
__lowerCAmelCase = 640
__lowerCAmelCase = 2560
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def _lowerCAmelCase ( lowercase ) -> List[str]:
if "encoder." in name:
__lowerCAmelCase = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
__lowerCAmelCase = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
__lowerCAmelCase = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
__lowerCAmelCase = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__lowerCAmelCase = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
__lowerCAmelCase = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
__lowerCAmelCase = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
__lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
__lowerCAmelCase = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
__lowerCAmelCase = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
__lowerCAmelCase = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
__lowerCAmelCase = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
__lowerCAmelCase = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__lowerCAmelCase = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__lowerCAmelCase = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
__lowerCAmelCase = name.replace("""head""" , """classifier""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]:
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(lowercase )
if key.startswith("""encoder.""" ):
__lowerCAmelCase = key.replace("""encoder.""" , """""" )
if "qkv" in key:
__lowerCAmelCase = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
__lowerCAmelCase = config.decoder_hidden_size
__lowerCAmelCase = int(key_split[2] )
__lowerCAmelCase = """decoder.decoder_layers."""
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = config.hidden_size
__lowerCAmelCase = int(key_split[1] )
__lowerCAmelCase = """videomae.encoder.layer."""
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val
return orig_state_dict
def _lowerCAmelCase ( ) -> str:
__lowerCAmelCase = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__lowerCAmelCase = np.load(lowercase )
return list(lowercase )
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__lowerCAmelCase = get_videomae_config(lowercase )
if "finetuned" in model_name:
__lowerCAmelCase = VideoMAEForVideoClassification(lowercase )
else:
__lowerCAmelCase = VideoMAEForPreTraining(lowercase )
# download original checkpoint, hosted on Google Drive
__lowerCAmelCase = """pytorch_model.bin"""
gdown.cached_download(lowercase , lowercase , quiet=lowercase )
__lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" )
if "model" in files:
__lowerCAmelCase = files["""model"""]
else:
__lowerCAmelCase = files["""module"""]
__lowerCAmelCase = convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
# verify model on basic input
__lowerCAmelCase = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__lowerCAmelCase = prepare_video()
__lowerCAmelCase = image_processor(lowercase , return_tensors="""pt""" )
if "finetuned" not in model_name:
__lowerCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
__lowerCAmelCase = torch.load(lowercase )
__lowerCAmelCase = model(**lowercase )
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
__lowerCAmelCase = torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(f'Model name not supported. Should be one of {model_names}' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , lowercase , atol=1e-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__lowerCAmelCase = outputs.loss
assert torch.allclose(lowercase , lowercase , atol=1e-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model and image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase )
model.save_pretrained(lowercase )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase , organization="""nielsr""" )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_a : int = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 689 | 1 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
# load base model
__lowerCAmelCase = StableDiffusionPipeline.from_pretrained(lowercase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
__lowerCAmelCase = load_file(lowercase )
__lowerCAmelCase = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
__lowerCAmelCase = pipeline.text_encoder
else:
__lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
__lowerCAmelCase = pipeline.unet
# find the target layer
__lowerCAmelCase = layer_infos.pop(0 )
while len(lowercase ) > -1:
try:
__lowerCAmelCase = curr_layer.__getattr__(lowercase )
if len(lowercase ) > 0:
__lowerCAmelCase = layer_infos.pop(0 )
elif len(lowercase ) == 0:
break
except Exception:
if len(lowercase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
__lowerCAmelCase = layer_infos.pop(0 )
__lowerCAmelCase = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(lowercase )
else:
pair_keys.append(lowercase )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
__lowerCAmelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
__lowerCAmelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase ).unsqueeze(2 ).unsqueeze(3 )
else:
__lowerCAmelCase = state_dict[pair_keys[0]].to(torch.floataa )
__lowerCAmelCase = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase )
# update visited list
for item in pair_keys:
visited.append(lowercase )
return pipeline
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
_a : Optional[int] = parser.parse_args()
_a : Dict = args.base_model_path
_a : Optional[Any] = args.checkpoint_path
_a : Union[str, Any] = args.dump_path
_a : Optional[int] = args.lora_prefix_unet
_a : int = args.lora_prefix_text_encoder
_a : str = args.alpha
_a : Any = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_a : Tuple = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 689 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_a : Tuple = """\
"""
_a : Tuple = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
_a : Optional[Any] = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ),reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = 16,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__lowerCAmelCase = """cuda"""
else:
__lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__lowerCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__SCREAMING_SNAKE_CASE ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__lowerCAmelCase = model.config.max_length - 1
else:
__lowerCAmelCase = model.config.max_length
__lowerCAmelCase = tokenizer(
__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,padding=__SCREAMING_SNAKE_CASE,truncation=__SCREAMING_SNAKE_CASE,max_length=__SCREAMING_SNAKE_CASE,return_tensors="""pt""",return_attention_mask=__SCREAMING_SNAKE_CASE,).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = encodings["""input_ids"""]
__lowerCAmelCase = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ),1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ),2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__lowerCAmelCase = []
__lowerCAmelCase = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0,len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase = min(start_index + batch_size,len(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = encoded_texts[start_index:end_index]
__lowerCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
__lowerCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch],dim=1 )
__lowerCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size(),dtype=torch.intaa ).to(__SCREAMING_SNAKE_CASE ), attn_mask],dim=1 )
__lowerCAmelCase = encoded_batch
with torch.no_grad():
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE ).logits
__lowerCAmelCase = out_logits[..., :-1, :].contiguous()
__lowerCAmelCase = labels[..., 1:].contiguous()
__lowerCAmelCase = attn_mask[..., 1:].contiguous()
__lowerCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1,2 ),__SCREAMING_SNAKE_CASE ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__SCREAMING_SNAKE_CASE )}
| 689 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase ) -> float:
if edge <= 0 or not isinstance(lowercase , lowercase ):
raise ValueError("""Length must be a positive.""" )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _lowerCAmelCase ( lowercase ) -> float:
if edge <= 0 or not isinstance(lowercase , lowercase ):
raise ValueError("""Length must be a positive.""" )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 689 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Union[str, Any] =["""image_processor"""]
a : Dict ="""SamImageProcessor"""
def __init__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.image_processor
__lowerCAmelCase = -10
__lowerCAmelCase = self.image_processor.size["""longest_edge"""]
def __call__( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = self.image_processor(
__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
# pop arguments that are not used in the foward but used nevertheless
__lowerCAmelCase = encoding_image_processor["""original_sizes"""]
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks if Torch or TF tensor
__lowerCAmelCase = original_sizes.numpy()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._check_and_preprocess_points(
input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = self._normalize_and_convert(
__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,)
return encoding_image_processor
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="pt",):
'''simple docstring'''
if input_points is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0] ) for point in input_points
]
else:
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
for point, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__lowerCAmelCase , __lowerCAmelCase = self._pad_points_and_labels(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_labels is not None:
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0],is_bounding_box=__SCREAMING_SNAKE_CASE )
for box in input_boxes
]
else:
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,is_bounding_box=__SCREAMING_SNAKE_CASE )
for box, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
]
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
__lowerCAmelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points} )
if input_labels is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels} )
return encoding_image_processor
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = max([point.shape[0] for point in input_points] )
__lowerCAmelCase = []
for i, point in enumerate(__SCREAMING_SNAKE_CASE ):
if point.shape[0] != expected_nb_points:
__lowerCAmelCase = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value],axis=0 )
__lowerCAmelCase = np.append(input_labels[i],[self.point_pad_value] )
processed_input_points.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = processed_input_points
return input_points, input_labels
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = original_size
__lowerCAmelCase , __lowerCAmelCase = self.image_processor._get_preprocess_shape(__SCREAMING_SNAKE_CASE,longest_edge=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = deepcopy(__SCREAMING_SNAKE_CASE ).astype(__SCREAMING_SNAKE_CASE )
if is_bounding_box:
__lowerCAmelCase = coords.reshape(-1,2,2 )
__lowerCAmelCase = coords[..., 0] * (new_w / old_w)
__lowerCAmelCase = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__lowerCAmelCase = coords.reshape(-1,4 )
return coords
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,):
'''simple docstring'''
if input_points is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks for TF or Torch tensor
__lowerCAmelCase = input_points.numpy().tolist()
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_points[0],__SCREAMING_SNAKE_CASE ):
raise ValueError("""Input points must be a list of list of floating points.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for input_point in input_points]
else:
__lowerCAmelCase = None
if input_labels is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ):
__lowerCAmelCase = input_labels.numpy().tolist()
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_labels[0],__SCREAMING_SNAKE_CASE ):
raise ValueError("""Input labels must be a list of list integers.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for label in input_labels]
else:
__lowerCAmelCase = None
if input_boxes is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ):
__lowerCAmelCase = input_boxes.numpy().tolist()
if (
not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0],__SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0][0],__SCREAMING_SNAKE_CASE )
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa ) for box in input_boxes]
else:
__lowerCAmelCase = None
return input_points, input_labels, input_boxes
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(__SCREAMING_SNAKE_CASE ) )
def lowerCamelCase__ ( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.image_processor.post_process_masks(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
| 689 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase = 1000 ) -> int:
__lowerCAmelCase , __lowerCAmelCase = 1, 1
__lowerCAmelCase = []
for i in range(1 , n + 1 ):
__lowerCAmelCase = prev_numerator + 2 * prev_denominator
__lowerCAmelCase = prev_numerator + prev_denominator
if len(str(lowercase ) ) > len(str(lowercase ) ):
result.append(lowercase )
__lowerCAmelCase = numerator
__lowerCAmelCase = denominator
return len(lowercase )
if __name__ == "__main__":
print(f'{solution() = }')
| 689 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
_a : int = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
a : Optional[str] =field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
a : Optional[str] =field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
a : int =field(
default=10_24 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
a : bool =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the training data."""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the validation data."""} )
a : Optional[str] =field(default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the test data."""} )
def lowerCamelCase__ ( self ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
__lowerCAmelCase = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__lowerCAmelCase = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _UpperCAmelCase :
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a : str =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def _lowerCAmelCase ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
__lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(lowercase )
datasets.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__lowerCAmelCase = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__lowerCAmelCase = data_args.train_file.split(""".""" )[-1]
__lowerCAmelCase = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__lowerCAmelCase = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(f'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
__lowerCAmelCase = load_dataset("""csv""" , data_files=lowercase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__lowerCAmelCase = load_dataset("""json""" , data_files=lowercase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__lowerCAmelCase = raw_datasets["""train"""].features["""label"""].names
__lowerCAmelCase = len(lowercase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__lowerCAmelCase = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowercase , )
__lowerCAmelCase = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__lowerCAmelCase = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__lowerCAmelCase = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__lowerCAmelCase = {"""Refused""": 0, """Entailed""": 1}
__lowerCAmelCase = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
__lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowercase ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowercase ):
__lowerCAmelCase = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
__lowerCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__lowerCAmelCase = examples["""statement"""]
__lowerCAmelCase = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
__lowerCAmelCase = tokenizer(lowercase , lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase )
__lowerCAmelCase = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
__lowerCAmelCase = raw_datasets.map(
lowercase , batched=lowercase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__lowerCAmelCase = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
__lowerCAmelCase = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__lowerCAmelCase = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
__lowerCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
__lowerCAmelCase = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
__lowerCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowercase ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase ):
__lowerCAmelCase = p.predictions[0] if isinstance(p.predictions , lowercase ) else p.predictions
__lowerCAmelCase = np.argmax(lowercase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__lowerCAmelCase = default_data_collator
elif training_args.fpaa:
__lowerCAmelCase = DataCollatorWithPadding(lowercase , pad_to_multiple_of=8 )
else:
__lowerCAmelCase = None
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=lowercase , args=lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase , tokenizer=lowercase , data_collator=lowercase , )
# Training
if training_args.do_train:
__lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowercase )
__lowerCAmelCase = train_result.metrics
__lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase )
)
__lowerCAmelCase = min(lowercase , len(lowercase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , lowercase )
trainer.save_metrics("""train""" , lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowerCAmelCase = trainer.evaluate(eval_dataset=lowercase )
__lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase )
__lowerCAmelCase = min(lowercase , len(lowercase ) )
trainer.log_metrics("""eval""" , lowercase )
trainer.save_metrics("""eval""" , lowercase )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__lowerCAmelCase = predict_dataset.remove_columns("""label""" )
__lowerCAmelCase = trainer.predict(lowercase , metric_key_prefix="""predict""" ).predictions
__lowerCAmelCase = np.argmax(lowercase , axis=1 )
__lowerCAmelCase = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(lowercase , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(lowercase ):
__lowerCAmelCase = label_list[item]
writer.write(f'{index}\t{item}\n' )
__lowerCAmelCase = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase )
else:
trainer.create_model_card(**lowercase )
def _lowerCAmelCase ( lowercase ) -> str:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 689 | 1 |
'''simple docstring'''
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_a : List[Any] = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
_a : Dict = subprocess.check_output(f'git diff --name-only {fork_point_sha}'.split()).decode("""utf-8""").split()
_a : Dict = """|""".join(sys.argv[1:])
_a : Any = re.compile(rf'^({joined_dirs}).*?\.py$')
_a : str = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 689 |
'''simple docstring'''
import os
import sys
import unittest
_a : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_a : Union[str, Any] = os.path.join(git_repo_path, """src""", """diffusers""")
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = find_backend(""" if not is_torch_available():""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__lowerCAmelCase = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__lowerCAmelCase = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch_and_transformers_and_onnx""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""",__SCREAMING_SNAKE_CASE )
self.assertIn("""torch_and_transformers""",__SCREAMING_SNAKE_CASE )
self.assertIn("""flax_and_transformers""",__SCREAMING_SNAKE_CASE )
self.assertIn("""torch_and_transformers_and_onnx""",__SCREAMING_SNAKE_CASE )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""",objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""",objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""",objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""",objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""",objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""",objects["""torch_and_transformers_and_onnx"""] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = create_dummy_object("""CONSTANT""","""'torch'""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""\nCONSTANT = None\n""" )
__lowerCAmelCase = create_dummy_object("""function""","""'torch'""" )
self.assertEqual(
__SCREAMING_SNAKE_CASE,"""\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
__lowerCAmelCase = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
__lowerCAmelCase = create_dummy_object("""FakeClass""","""'torch'""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
__lowerCAmelCase = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""],__SCREAMING_SNAKE_CASE )
| 689 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase , lowercase ) -> list[int]:
__lowerCAmelCase = int(lowercase )
# Initialize Result
__lowerCAmelCase = []
# Traverse through all denomination
for denomination in reversed(lowercase ):
# Find denominations
while int(lowercase ) >= int(lowercase ):
total_value -= int(lowercase )
answer.append(lowercase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
_a : str = []
_a : Optional[int] = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
_a : Union[str, Any] = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(f'Denomination {i}: ').strip()))
_a : Optional[Any] = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
_a : Optional[int] = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
_a : Any = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(f'Following is minimal change for {value}: ')
_a : List[str] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 689 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase ) -> tuple[int, int]:
try:
__lowerCAmelCase = float(lowercase )
except ValueError:
raise ValueError("""Please enter a valid number""" )
__lowerCAmelCase = decimal - int(lowercase )
if fractional_part == 0:
return int(lowercase ), 1
else:
__lowerCAmelCase = len(str(lowercase ).split(""".""" )[1] )
__lowerCAmelCase = int(decimal * (10**number_of_frac_digits) )
__lowerCAmelCase = 10**number_of_frac_digits
__lowerCAmelCase , __lowerCAmelCase = denominator, numerator
while True:
__lowerCAmelCase = dividend % divisor
if remainder == 0:
break
__lowerCAmelCase , __lowerCAmelCase = divisor, remainder
__lowerCAmelCase , __lowerCAmelCase = numerator / divisor, denominator / divisor
return int(lowercase ), int(lowercase )
if __name__ == "__main__":
print(f'{decimal_to_fraction(2) = }')
print(f'{decimal_to_fraction(89.0) = }')
print(f'{decimal_to_fraction("67") = }')
print(f'{decimal_to_fraction("45.0") = }')
print(f'{decimal_to_fraction(1.5) = }')
print(f'{decimal_to_fraction("6.25") = }')
print(f'{decimal_to_fraction("78td") = }')
| 689 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase , lowercase ) -> str:
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
__lowerCAmelCase = str(bin(lowercase ) )[2:] # remove the leading "0b"
__lowerCAmelCase = str(bin(lowercase ) )[2:] # remove the leading "0b"
__lowerCAmelCase = max(len(lowercase ) , len(lowercase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(lowercase ) , b_binary.zfill(lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 689 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_a : Dict = _symbol_database.Default()
_a : Union[str, Any] = _descriptor_pool.Default().AddSerializedFile(
b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
_a : str = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_a : str = None
_a : Union[str, Any] = b"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_a : Optional[int] = 4_5
_a : List[Any] = 1_5_8_1
_a : str = 1_5_1_7
_a : Optional[Any] = 1_5_7_0
_a : List[str] = 1_5_8_4
_a : List[Any] = 1_7_9_3
_a : Union[str, Any] = 1_7_9_5
_a : Tuple = 1_9_1_6
_a : List[Any] = 1_8_6_4
_a : Any = 1_9_0_5
_a : Optional[Any] = 1_9_1_9
_a : Optional[int] = 2_4_2_9
_a : Tuple = 2_2_0_8
_a : Optional[Any] = 2_4_1_8
_a : List[Any] = 2_3_2_3
_a : str = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 689 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_a : Union[str, Any] = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[int] = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_a : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : torch.FloatTensor
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE=True,):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = layers_per_block
__lowerCAmelCase = torch.nn.Convad(
__SCREAMING_SNAKE_CASE,block_out_channels[0],kernel_size=3,stride=1,padding=1,)
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
# down
__lowerCAmelCase = block_out_channels[0]
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = block_out_channels[i]
__lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase = get_down_block(
__SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,add_downsample=not is_final_block,resnet_eps=1e-6,downsample_padding=0,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
__lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""",attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
# out
__lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[-1],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = 2 * out_channels if double_z else out_channels
__lowerCAmelCase = nn.Convad(block_out_channels[-1],__SCREAMING_SNAKE_CASE,3,padding=1 )
__lowerCAmelCase = False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = x
__lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(""">=""","""1.11.0""" ):
for down_block in self.down_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
__lowerCAmelCase = down_block(__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE )
# post-process
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE="group",):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = layers_per_block
__lowerCAmelCase = nn.Convad(
__SCREAMING_SNAKE_CASE,block_out_channels[-1],kernel_size=3,stride=1,padding=1,)
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
__lowerCAmelCase = in_channels if norm_type == """spatial""" else None
# mid
__lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type,attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
# up
__lowerCAmelCase = list(reversed(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = reversed_block_out_channels[i]
__lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase = get_up_block(
__SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block + 1,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,prev_output_channel=__SCREAMING_SNAKE_CASE,add_upsample=not is_final_block,resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,resnet_time_scale_shift=__SCREAMING_SNAKE_CASE,)
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = output_channel
# out
if norm_type == "spatial":
__lowerCAmelCase = SpatialNorm(block_out_channels[0],__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[0],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = nn.Convad(block_out_channels[0],__SCREAMING_SNAKE_CASE,3,padding=1 )
__lowerCAmelCase = False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
__lowerCAmelCase = z
__lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(""">=""","""1.11.0""" ):
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
else:
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
else:
# middle
__lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = up_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="random",__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = n_e
__lowerCAmelCase = vq_embed_dim
__lowerCAmelCase = beta
__lowerCAmelCase = legacy
__lowerCAmelCase = nn.Embedding(self.n_e,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e,1.0 / self.n_e )
__lowerCAmelCase = remap
if self.remap is not None:
self.register_buffer("""used""",torch.tensor(np.load(self.remap ) ) )
__lowerCAmelCase = self.used.shape[0]
__lowerCAmelCase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__lowerCAmelCase = self.re_embed
__lowerCAmelCase = self.re_embed + 1
print(
f'Remapping {self.n_e} indices to {self.re_embed} indices. '
f'Using {self.unknown_index} for unknown indices.' )
else:
__lowerCAmelCase = n_e
__lowerCAmelCase = sane_index_shape
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
__lowerCAmelCase = inds.reshape(ishape[0],-1 )
__lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = (inds[:, :, None] == used[None, None, ...]).long()
__lowerCAmelCase = match.argmax(-1 )
__lowerCAmelCase = match.sum(2 ) < 1
if self.unknown_index == "random":
__lowerCAmelCase = torch.randint(0,self.re_embed,size=new[unknown].shape ).to(device=new.device )
else:
__lowerCAmelCase = self.unknown_index
return new.reshape(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
__lowerCAmelCase = inds.reshape(ishape[0],-1 )
__lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
__lowerCAmelCase = 0 # simply set to zero
__lowerCAmelCase = torch.gather(used[None, :][inds.shape[0] * [0], :],1,__SCREAMING_SNAKE_CASE )
return back.reshape(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = z.permute(0,2,3,1 ).contiguous()
__lowerCAmelCase = z.view(-1,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__lowerCAmelCase = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE,self.embedding.weight ),dim=1 )
__lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape )
__lowerCAmelCase = None
__lowerCAmelCase = None
# compute loss for embedding
if not self.legacy:
__lowerCAmelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__lowerCAmelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__lowerCAmelCase = z + (z_q - z).detach()
# reshape back to match original input shape
__lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous()
if self.remap is not None:
__lowerCAmelCase = min_encoding_indices.reshape(z.shape[0],-1 ) # add batch axis
__lowerCAmelCase = self.remap_to_used(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = min_encoding_indices.reshape(-1,1 ) # flatten
if self.sane_index_shape:
__lowerCAmelCase = min_encoding_indices.reshape(z_q.shape[0],z_q.shape[2],z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if self.remap is not None:
__lowerCAmelCase = indices.reshape(shape[0],-1 ) # add batch axis
__lowerCAmelCase = self.unmap_to_all(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE )
if shape is not None:
__lowerCAmelCase = z_q.view(__SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
__lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous()
return z_q
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = parameters
__lowerCAmelCase , __lowerCAmelCase = torch.chunk(__SCREAMING_SNAKE_CASE,2,dim=1 )
__lowerCAmelCase = torch.clamp(self.logvar,-30.0,20.0 )
__lowerCAmelCase = deterministic
__lowerCAmelCase = torch.exp(0.5 * self.logvar )
__lowerCAmelCase = torch.exp(self.logvar )
if self.deterministic:
__lowerCAmelCase = __lowerCAmelCase = torch.zeros_like(
self.mean,device=self.parameters.device,dtype=self.parameters.dtype )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
__lowerCAmelCase = randn_tensor(
self.mean.shape,generator=__SCREAMING_SNAKE_CASE,device=self.parameters.device,dtype=self.parameters.dtype )
__lowerCAmelCase = self.mean + self.std * sample
return x
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean,2 ) + self.var - 1.0 - self.logvar,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar,dim=[1, 2, 3],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
__lowerCAmelCase = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean,2 ) / self.var,dim=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.mean
| 689 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
_a : Optional[Any] = logging.getLogger(__name__)
_a : str = tf.data.AUTOTUNE
def _lowerCAmelCase ( ) -> Tuple:
__lowerCAmelCase = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=lowercase , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=lowercase , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=lowercase , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=lowercase , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=lowercase , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=lowercase , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=lowercase , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=lowercase , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=lowercase , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=lowercase , default=1e-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=lowercase , default=1e-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=lowercase , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=lowercase , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=lowercase , required=lowercase , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=lowercase , help="""Model ID to upload to on the Hugging Face Hub.""" )
__lowerCAmelCase = parser.parse_args()
return args
def _lowerCAmelCase ( lowercase ) -> int:
try:
if args.tpu_name:
__lowerCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
__lowerCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(lowercase )
tf.tpu.experimental.initialize_tpu_system(lowercase )
return tpu
def _lowerCAmelCase ( lowercase ) -> List[str]:
__lowerCAmelCase = 0
for file in file_list:
__lowerCAmelCase = file.split("""/""" )[-1]
__lowerCAmelCase = re.search(R"""-\d+-(\d+)\.tfrecord""" , lowercase ).group(1 )
__lowerCAmelCase = int(lowercase )
num_samples += sample_count
return num_samples
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=None ) -> Any:
__lowerCAmelCase = count_samples(lowercase )
__lowerCAmelCase = tf.data.Dataset.from_tensor_slices(lowercase )
if shuffle:
__lowerCAmelCase = dataset.shuffle(len(lowercase ) )
__lowerCAmelCase = tf.data.TFRecordDataset(lowercase , num_parallel_reads=lowercase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
__lowerCAmelCase = dataset.apply(tf.data.experimental.assert_cardinality(lowercase ) )
__lowerCAmelCase = dataset.map(lowercase , num_parallel_calls=lowercase )
if shuffle:
assert shuffle_buffer_size is not None
__lowerCAmelCase = dataset.shuffle(args.shuffle_buffer_size )
__lowerCAmelCase = dataset.batch(lowercase , drop_remainder=lowercase )
__lowerCAmelCase = dataset.map(lowercase , num_parallel_calls=lowercase )
__lowerCAmelCase = dataset.prefetch(lowercase )
return dataset
def _lowerCAmelCase ( lowercase ) -> int:
if not args.no_tpu:
__lowerCAmelCase = initialize_tpu(lowercase )
__lowerCAmelCase = tf.distribute.TPUStrategy(lowercase )
else:
__lowerCAmelCase = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
__lowerCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer )
__lowerCAmelCase = AutoConfig.from_pretrained(args.pretrained_model_config )
__lowerCAmelCase = tokenizer.vocab_size
__lowerCAmelCase = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(f'No .tfrecord files found in {args.train_dataset}.' )
__lowerCAmelCase = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f'No .tfrecord files found in {args.eval_dataset}.' )
__lowerCAmelCase = count_samples(lowercase )
__lowerCAmelCase = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
__lowerCAmelCase = steps_per_epoch * args.num_epochs
with strategy.scope():
__lowerCAmelCase = TFAutoModelForMaskedLM.from_config(lowercase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
__lowerCAmelCase , __lowerCAmelCase = create_optimizer(
num_train_steps=lowercase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowercase , metrics=["""accuracy"""] )
def decode_fn(lowercase ):
__lowerCAmelCase = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowercase , lowercase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
__lowerCAmelCase = DataCollatorForLanguageModeling(
tokenizer=lowercase , mlm_probability=args.mlm_probability , mlm=lowercase , return_tensors="""tf""" )
def mask_with_collator(lowercase ):
# TF really needs an isin() function
__lowerCAmelCase = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
__lowerCAmelCase , __lowerCAmelCase = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(lowercase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase , )
return batch
__lowerCAmelCase = args.per_replica_batch_size * strategy.num_replicas_in_sync
__lowerCAmelCase = prepare_dataset(
lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , shuffle_buffer_size=args.shuffle_buffer_size , )
__lowerCAmelCase = prepare_dataset(
lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , )
__lowerCAmelCase = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase ) )
model.fit(
lowercase , validation_data=lowercase , epochs=args.num_epochs , callbacks=lowercase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
_a : Any = parse_args()
main(args)
| 689 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_a : Optional[int] = logging.get_logger(__name__)
_a : int = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_a : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(lowerCAmelCase_ )} )
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
a : int =field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : int =field(
default=1_28 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
a : int =field(
default=64 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
a : int =field(
default=30 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
a : float =field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a : int =field(
default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a : int =field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
a : int =field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Optional[Any] ="""train"""
a : Optional[int] ="""dev"""
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : SquadDataTrainingArguments
a : List[SquadFeatures]
a : Split
a : bool
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = Split.train,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = "pt",):
'''simple docstring'''
__lowerCAmelCase = args
__lowerCAmelCase = is_language_sensitive
__lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
try:
__lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
__lowerCAmelCase = mode
# Load data features from cache or dataset file
__lowerCAmelCase = """v2""" if args.version_2_with_negative else """v1"""
__lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase = cached_features_file + """.lock"""
with FileLock(__SCREAMING_SNAKE_CASE ):
if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache:
__lowerCAmelCase = time.time()
__lowerCAmelCase = torch.load(__SCREAMING_SNAKE_CASE )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__lowerCAmelCase = self.old_features["""features"""]
__lowerCAmelCase = self.old_features.get("""dataset""",__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.old_features.get("""examples""",__SCREAMING_SNAKE_CASE )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
""" future run""" )
else:
if mode == Split.dev:
__lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
__lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
__lowerCAmelCase , __lowerCAmelCase = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__SCREAMING_SNAKE_CASE,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples},__SCREAMING_SNAKE_CASE,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.features[i]
__lowerCAmelCase = torch.tensor(feature.input_ids,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.attention_mask,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.token_type_ids,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.cls_index,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.p_mask,dtype=torch.float )
__lowerCAmelCase = torch.tensor(feature.is_impossible,dtype=torch.float )
__lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__lowerCAmelCase = torch.tensor(feature.start_position,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 689 | 1 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class _UpperCAmelCase ( TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
def __init__( self,__SCREAMING_SNAKE_CASE=None,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(features=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch_tensor_kwargs
import torch # noqa import torch at initialization
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
import torch
if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) and column:
if all(
isinstance(__SCREAMING_SNAKE_CASE,torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(__SCREAMING_SNAKE_CASE )
return column
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
import torch
if isinstance(__SCREAMING_SNAKE_CASE,(str, bytes, type(__SCREAMING_SNAKE_CASE )) ):
return value
elif isinstance(__SCREAMING_SNAKE_CASE,(np.character, np.ndarray) ) and np.issubdtype(value.dtype,np.character ):
return value.tolist()
__lowerCAmelCase = {}
if isinstance(__SCREAMING_SNAKE_CASE,(np.number, np.ndarray) ) and np.issubdtype(value.dtype,np.integer ):
__lowerCAmelCase = {"""dtype""": torch.intaa}
elif isinstance(__SCREAMING_SNAKE_CASE,(np.number, np.ndarray) ) and np.issubdtype(value.dtype,np.floating ):
__lowerCAmelCase = {"""dtype""": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__SCREAMING_SNAKE_CASE,PIL.Image.Image ):
__lowerCAmelCase = np.asarray(__SCREAMING_SNAKE_CASE )
return torch.tensor(__SCREAMING_SNAKE_CASE,**{**default_dtype, **self.torch_tensor_kwargs} )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(__SCREAMING_SNAKE_CASE,"""__array__""" ) and not isinstance(__SCREAMING_SNAKE_CASE,torch.Tensor ):
__lowerCAmelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__SCREAMING_SNAKE_CASE,np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE ) for substruct in data_struct] )
elif isinstance(__SCREAMING_SNAKE_CASE,(list, tuple) ):
return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE ) for substruct in data_struct] )
return self._tensorize(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return map_nested(self._recursive_tensorize,__SCREAMING_SNAKE_CASE,map_list=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.numpy_arrow_extractor().extract_row(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.python_features_decoder.decode_row(__SCREAMING_SNAKE_CASE )
return self.recursive_tensorize(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.numpy_arrow_extractor().extract_column(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.python_features_decoder.decode_column(__SCREAMING_SNAKE_CASE,pa_table.column_names[0] )
__lowerCAmelCase = self.recursive_tensorize(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self._consolidate(__SCREAMING_SNAKE_CASE )
return column
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.numpy_arrow_extractor().extract_batch(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.python_features_decoder.decode_batch(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.recursive_tensorize(__SCREAMING_SNAKE_CASE )
for column_name in batch:
__lowerCAmelCase = self._consolidate(batch[column_name] )
return batch
| 689 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _lowerCAmelCase ( lowercase ) -> Optional[Any]:
# vision encoder
if "img_encoder.pos_embed" in name:
__lowerCAmelCase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
__lowerCAmelCase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
__lowerCAmelCase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
__lowerCAmelCase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
__lowerCAmelCase = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
__lowerCAmelCase = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
__lowerCAmelCase = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
__lowerCAmelCase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
__lowerCAmelCase = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
__lowerCAmelCase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
__lowerCAmelCase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
__lowerCAmelCase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
__lowerCAmelCase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
__lowerCAmelCase = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
__lowerCAmelCase = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
__lowerCAmelCase = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
__lowerCAmelCase = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
__lowerCAmelCase = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
__lowerCAmelCase = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
__lowerCAmelCase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
__lowerCAmelCase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
__lowerCAmelCase = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
__lowerCAmelCase = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> Dict:
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(lowercase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase = key.split(""".""" )
__lowerCAmelCase , __lowerCAmelCase = int(key_split[2] ), int(key_split[4] )
__lowerCAmelCase = config.vision_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase = key.split(""".""" )
__lowerCAmelCase = int(key_split[3] )
__lowerCAmelCase = config.text_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[
dim : dim * 2, :
]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
else:
__lowerCAmelCase = rename_key(lowercase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
__lowerCAmelCase = val.squeeze_()
else:
__lowerCAmelCase = val
return orig_state_dict
def _lowerCAmelCase ( ) -> str:
__lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCAmelCase = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase , lowercase="groupvit-gcc-yfcc" , lowercase=False ) -> List[Any]:
__lowerCAmelCase = GroupViTConfig()
__lowerCAmelCase = GroupViTModel(lowercase ).eval()
__lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" )["""model"""]
__lowerCAmelCase = convert_state_dict(lowercase , lowercase )
__lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowercase , strict=lowercase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowercase ) == 0)
# verify result
__lowerCAmelCase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowercase , padding=lowercase , return_tensors="""pt""" )
with torch.no_grad():
__lowerCAmelCase = model(**lowercase )
if model_name == "groupvit-gcc-yfcc":
__lowerCAmelCase = torch.tensor([[13.35_23, 6.36_29]] )
elif model_name == "groupvit-gcc-redcaps":
__lowerCAmelCase = torch.tensor([[16.18_73, 8.62_30]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , lowercase , atol=1e-3 )
processor.save_pretrained(lowercase )
model.save_pretrained(lowercase )
print("""Successfully saved processor and model to""" , lowercase )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowercase , organization="""nielsr""" )
model.push_to_hub(lowercase , organization="""nielsr""" )
if __name__ == "__main__":
_a : int = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
_a : List[str] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 689 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase ) -> int:
__lowerCAmelCase = len(lowercase )
__lowerCAmelCase = len(matrix[0] )
__lowerCAmelCase = min(lowercase , lowercase )
for row in range(lowercase ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , lowercase ):
__lowerCAmelCase = matrix[col][row] / matrix[row][row]
for i in range(lowercase , lowercase ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
__lowerCAmelCase = True
for i in range(row + 1 , lowercase ):
if matrix[i][row] != 0:
__lowerCAmelCase , __lowerCAmelCase = matrix[i], matrix[row]
__lowerCAmelCase = False
break
if reduce:
rank -= 1
for i in range(lowercase ):
__lowerCAmelCase = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 689 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_a : Tuple = logging.get_logger(__name__)
_a : Optional[int] = ["""model.decoder.embed_positions.weights"""]
def _lowerCAmelCase ( lowercase ) -> Optional[Any]:
if "emb" in name:
__lowerCAmelCase = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
__lowerCAmelCase = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
__lowerCAmelCase = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
__lowerCAmelCase = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
__lowerCAmelCase = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
__lowerCAmelCase = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
__lowerCAmelCase = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
__lowerCAmelCase = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
__lowerCAmelCase = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
__lowerCAmelCase = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> Tuple[Dict, Dict]:
__lowerCAmelCase = list(state_dict.keys() )
__lowerCAmelCase = {}
for key in keys:
__lowerCAmelCase = state_dict.pop(lowercase )
__lowerCAmelCase = rename_keys(lowercase )
if "in_proj_weight" in key:
# split fused qkv proj
__lowerCAmelCase = val[:hidden_size, :]
__lowerCAmelCase = val[hidden_size : 2 * hidden_size, :]
__lowerCAmelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__lowerCAmelCase = val
else:
__lowerCAmelCase = val
return state_dict, enc_dec_proj_state_dict
def _lowerCAmelCase ( lowercase ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
__lowerCAmelCase = 1024
__lowerCAmelCase = 24
__lowerCAmelCase = 16
elif checkpoint == "medium":
__lowerCAmelCase = 1536
__lowerCAmelCase = 48
__lowerCAmelCase = 24
elif checkpoint == "large":
__lowerCAmelCase = 2048
__lowerCAmelCase = 48
__lowerCAmelCase = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
__lowerCAmelCase = MusicgenDecoderConfig(
hidden_size=lowercase , ffn_dim=hidden_size * 4 , num_hidden_layers=lowercase , num_attention_heads=lowercase , )
return config
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase=None , lowercase=None , lowercase="cpu" ) -> Optional[Any]:
__lowerCAmelCase = MusicGen.get_pretrained(lowercase , device=lowercase )
__lowerCAmelCase = decoder_config_from_checkpoint(lowercase )
__lowerCAmelCase = fairseq_model.lm.state_dict()
__lowerCAmelCase , __lowerCAmelCase = rename_state_dict(
lowercase , hidden_size=decoder_config.hidden_size )
__lowerCAmelCase = TaEncoderModel.from_pretrained("""t5-base""" )
__lowerCAmelCase = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
__lowerCAmelCase = MusicgenForCausalLM(lowercase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__lowerCAmelCase , __lowerCAmelCase = decoder.load_state_dict(lowercase , strict=lowercase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase )
if len(lowercase ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(lowercase ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
__lowerCAmelCase = MusicgenForConditionalGeneration(text_encoder=lowercase , audio_encoder=lowercase , decoder=lowercase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase )
# check we can do a forward pass
__lowerCAmelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__lowerCAmelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__lowerCAmelCase = model(input_ids=lowercase , decoder_input_ids=lowercase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
__lowerCAmelCase = AutoTokenizer.from_pretrained("""t5-base""" )
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
__lowerCAmelCase = MusicgenProcessor(feature_extractor=lowercase , tokenizer=lowercase )
# set the appropriate bos/pad token ids
__lowerCAmelCase = 2048
__lowerCAmelCase = 2048
# set other default generation config params
__lowerCAmelCase = int(30 * audio_encoder.config.frame_rate )
__lowerCAmelCase = True
__lowerCAmelCase = 3.0
if pytorch_dump_folder is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(lowercase )
processor.save_pretrained(lowercase )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(lowercase )
processor.push_to_hub(lowercase )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
_a : List[Any] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 689 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> int:
def count_of_possible_combinations(lowercase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(lowercase )
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> int:
def count_of_possible_combinations_with_dp_array(
lowercase , lowercase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__lowerCAmelCase = sum(
count_of_possible_combinations_with_dp_array(target - item , lowercase )
for item in array )
__lowerCAmelCase = answer
return answer
__lowerCAmelCase = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(lowercase , lowercase )
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> int:
__lowerCAmelCase = [0] * (target + 1)
__lowerCAmelCase = 1
for i in range(1 , target + 1 ):
for j in range(lowercase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_a : int = 3
_a : Dict = 5
_a : int = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 689 |
'''simple docstring'''
from collections import deque
def _lowerCAmelCase ( lowercase ) -> Dict:
__lowerCAmelCase = len(lowercase )
__lowerCAmelCase = deque()
__lowerCAmelCase = [False for _ in range(lowercase )]
__lowerCAmelCase = [-1 for _ in range(lowercase )]
__lowerCAmelCase = index_of[:]
def strong_connect(lowercase , lowercase , lowercase ):
__lowerCAmelCase = index # the number when this node is seen
__lowerCAmelCase = index # lowest rank node reachable from here
index += 1
stack.append(lowercase )
__lowerCAmelCase = True
for w in g[v]:
if index_of[w] == -1:
__lowerCAmelCase = strong_connect(lowercase , lowercase , lowercase )
__lowerCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__lowerCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__lowerCAmelCase = []
__lowerCAmelCase = stack.pop()
__lowerCAmelCase = False
component.append(lowercase )
while w != v:
__lowerCAmelCase = stack.pop()
__lowerCAmelCase = False
component.append(lowercase )
components.append(lowercase )
return index
__lowerCAmelCase = []
for v in range(lowercase ):
if index_of[v] == -1:
strong_connect(lowercase , 0 , lowercase )
return components
def _lowerCAmelCase ( lowercase , lowercase ) -> str:
__lowerCAmelCase = [[] for _ in range(lowercase )]
for u, v in edges:
g[u].append(lowercase )
return g
if __name__ == "__main__":
# Test
_a : Any = 7
_a : Tuple = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_a : Optional[int] = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_a : Optional[Any] = [(u, v) for u, v in zip(source, target)]
_a : Optional[int] = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 689 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_a : int = logging.get_logger(__name__)
_a : Optional[int] = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[str] ="""gptj"""
a : Optional[int] ={
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self,__SCREAMING_SNAKE_CASE=5_04_00,__SCREAMING_SNAKE_CASE=20_48,__SCREAMING_SNAKE_CASE=40_96,__SCREAMING_SNAKE_CASE=28,__SCREAMING_SNAKE_CASE=16,__SCREAMING_SNAKE_CASE=64,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="gelu_new",__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=1e-5,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=False,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_embd
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_inner
__lowerCAmelCase = rotary_dim
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = use_cache
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(
bos_token_id=__SCREAMING_SNAKE_CASE,eos_token_id=__SCREAMING_SNAKE_CASE,tie_word_embeddings=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = "default",__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False,):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE,task=__SCREAMING_SNAKE_CASE,patching_specs=__SCREAMING_SNAKE_CASE,use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config,"""pad_token_id""",__SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
__lowerCAmelCase = 0
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE,direction="""inputs""" )
__lowerCAmelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._config.n_head
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,):
'''simple docstring'''
__lowerCAmelCase = super(__SCREAMING_SNAKE_CASE,self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE,batch_size=__SCREAMING_SNAKE_CASE,seq_length=__SCREAMING_SNAKE_CASE,is_pair=__SCREAMING_SNAKE_CASE,framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowerCAmelCase = seqlen + 2
__lowerCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCAmelCase = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
__lowerCAmelCase = common_inputs["""attention_mask"""]
if self.use_past:
__lowerCAmelCase = ordered_inputs["""attention_mask"""].dtype
__lowerCAmelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,dtype=__SCREAMING_SNAKE_CASE )],dim=1 )
return ordered_inputs
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 13
| 689 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ) -> Union[str, Any]:
__lowerCAmelCase = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
__lowerCAmelCase = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(lowercase )
# Let's go
__lowerCAmelCase = parser.parse_args()
if not hasattr(lowercase , """func""" ):
parser.print_help()
exit(1 )
# Run
__lowerCAmelCase = args.func(lowercase )
service.run()
if __name__ == "__main__":
main()
| 689 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Optional[int] =TextToVideoSDPipeline
a : Optional[int] =TEXT_TO_IMAGE_PARAMS
a : Any =TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a : Union[str, Any] =frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D"""),up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D"""),cross_attention_dim=32,attention_head_dim=4,)
__lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0085,beta_end=0.012,beta_schedule="""scaled_linear""",clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,)
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],latent_channels=4,sample_size=1_28,)
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1e-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=10_00,hidden_act="""gelu""",projection_dim=5_12,)
__lowerCAmelCase = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = TextToVideoSDPipeline(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = """np"""
__lowerCAmelCase = sd_pipe(**__SCREAMING_SNAKE_CASE ).frames
__lowerCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__lowerCAmelCase = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available(),reason="""XFormers attention is only available with CUDA and `xformers` installed""",)
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
__lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowerCAmelCase = pipe.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=25,output_type="""pt""" ).frames
__lowerCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
__lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase = pipe.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=2,output_type="""pt""" ).frames
__lowerCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 689 |
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_a : List[Any] = logging.get_logger(__name__)
_a : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
_a : Any = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> str:
for attribute in key.split(""".""" ):
__lowerCAmelCase = getattr(lowercase , lowercase )
if weight_type is not None:
__lowerCAmelCase = getattr(lowercase , lowercase ).shape
else:
__lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
__lowerCAmelCase = value
elif weight_type == "weight_g":
__lowerCAmelCase = value
elif weight_type == "weight_v":
__lowerCAmelCase = value
elif weight_type == "bias":
__lowerCAmelCase = value
else:
__lowerCAmelCase = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]:
__lowerCAmelCase = []
__lowerCAmelCase = fairseq_model.state_dict()
__lowerCAmelCase = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , )
__lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
__lowerCAmelCase = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split(""".""" )[:-1] ) != key):
# special case since naming is very similar
continue
__lowerCAmelCase = True
if "*" in mapped_key:
__lowerCAmelCase = name.split(lowercase )[0].split(""".""" )[-2]
__lowerCAmelCase = mapped_key.replace("""*""" , lowercase )
if "weight_g" in name:
__lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
__lowerCAmelCase = """weight_v"""
elif "bias" in name:
__lowerCAmelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase = """weight"""
else:
__lowerCAmelCase = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__lowerCAmelCase = full_name.split("""conv_layers.""" )[-1]
__lowerCAmelCase = name.split(""".""" )
__lowerCAmelCase = int(items[0] )
__lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Dict:
if config_path is not None:
__lowerCAmelCase = UniSpeechSatConfig.from_pretrained(lowercase )
else:
__lowerCAmelCase = UniSpeechSatConfig()
__lowerCAmelCase = """"""
if is_finetuned:
__lowerCAmelCase = UniSpeechSatForCTC(lowercase )
else:
__lowerCAmelCase = UniSpeechSatForPreTraining(lowercase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
__lowerCAmelCase = model[0].eval()
recursively_load_weights(lowercase , lowercase )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
_a : List[str] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_a : Union[str, Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 689 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase = 100_0000 ) -> int:
__lowerCAmelCase = set(range(3 , lowercase , 2 ) )
primes.add(2 )
for p in range(3 , lowercase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowercase , lowercase ) ) )
__lowerCAmelCase = [float(lowercase ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowercase , limit + 1 , lowercase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 689 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
_a : str = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
_a : Dict = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
_a : List[str] = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ),reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = spearmanr(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 689 | 1 |
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(lowercase , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _lowerCAmelCase ( lowercase , lowercase ) -> Optional[int]:
__lowerCAmelCase = _distribute_shards(**lowercase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> Dict:
__lowerCAmelCase = _split_gen_kwargs(lowercase , lowercase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def _lowerCAmelCase ( lowercase , lowercase ) -> int:
if expected is RuntimeError:
with pytest.raises(lowercase ):
_number_of_shards_in_gen_kwargs(lowercase )
else:
__lowerCAmelCase = _number_of_shards_in_gen_kwargs(lowercase )
assert out == expected
| 689 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=lowerCAmelCase_ ):
a : List[str] =["""onnx"""]
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(self,["""onnx"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""onnx"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""onnx"""] )
| 689 | 1 |
'''simple docstring'''
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
_a : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=7_68 ):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = proj_size
__lowerCAmelCase = CLIPVisionModel(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = PaintByExampleMapper(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = nn.LayerNorm(config.hidden_size )
__lowerCAmelCase = nn.Linear(config.hidden_size,self.proj_size )
# uncondition for scaling
__lowerCAmelCase = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = self.model(pixel_values=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = clip_output.pooler_output
__lowerCAmelCase = self.mapper(latent_states[:, None] )
__lowerCAmelCase = self.final_layer_norm(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.proj_out(__SCREAMING_SNAKE_CASE )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = (config.num_hidden_layers + 1) // 5
__lowerCAmelCase = config.hidden_size
__lowerCAmelCase = 1
__lowerCAmelCase = nn.ModuleList(
[
BasicTransformerBlock(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,activation_fn="""gelu""",attention_bias=__SCREAMING_SNAKE_CASE )
for _ in range(__SCREAMING_SNAKE_CASE )
] )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for block in self.blocks:
__lowerCAmelCase = block(__SCREAMING_SNAKE_CASE )
return hidden_states
| 689 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_a : int = logging.get_logger(__name__)
_a : Optional[int] = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[str] ="""gptj"""
a : Optional[int] ={
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self,__SCREAMING_SNAKE_CASE=5_04_00,__SCREAMING_SNAKE_CASE=20_48,__SCREAMING_SNAKE_CASE=40_96,__SCREAMING_SNAKE_CASE=28,__SCREAMING_SNAKE_CASE=16,__SCREAMING_SNAKE_CASE=64,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="gelu_new",__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=1e-5,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=False,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_embd
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_inner
__lowerCAmelCase = rotary_dim
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = use_cache
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(
bos_token_id=__SCREAMING_SNAKE_CASE,eos_token_id=__SCREAMING_SNAKE_CASE,tie_word_embeddings=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = "default",__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False,):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE,task=__SCREAMING_SNAKE_CASE,patching_specs=__SCREAMING_SNAKE_CASE,use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config,"""pad_token_id""",__SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
__lowerCAmelCase = 0
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE,direction="""inputs""" )
__lowerCAmelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._config.n_head
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,):
'''simple docstring'''
__lowerCAmelCase = super(__SCREAMING_SNAKE_CASE,self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE,batch_size=__SCREAMING_SNAKE_CASE,seq_length=__SCREAMING_SNAKE_CASE,is_pair=__SCREAMING_SNAKE_CASE,framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowerCAmelCase = seqlen + 2
__lowerCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCAmelCase = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
__lowerCAmelCase = common_inputs["""attention_mask"""]
if self.use_past:
__lowerCAmelCase = ordered_inputs["""attention_mask"""].dtype
__lowerCAmelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,dtype=__SCREAMING_SNAKE_CASE )],dim=1 )
return ordered_inputs
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 13
| 689 | 1 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Union[str, Any] =["""image_processor"""]
a : Dict ="""SamImageProcessor"""
def __init__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.image_processor
__lowerCAmelCase = -10
__lowerCAmelCase = self.image_processor.size["""longest_edge"""]
def __call__( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = self.image_processor(
__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
# pop arguments that are not used in the foward but used nevertheless
__lowerCAmelCase = encoding_image_processor["""original_sizes"""]
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks if Torch or TF tensor
__lowerCAmelCase = original_sizes.numpy()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._check_and_preprocess_points(
input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = self._normalize_and_convert(
__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,)
return encoding_image_processor
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="pt",):
'''simple docstring'''
if input_points is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0] ) for point in input_points
]
else:
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
for point, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__lowerCAmelCase , __lowerCAmelCase = self._pad_points_and_labels(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_labels is not None:
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0],is_bounding_box=__SCREAMING_SNAKE_CASE )
for box in input_boxes
]
else:
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,is_bounding_box=__SCREAMING_SNAKE_CASE )
for box, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
]
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
__lowerCAmelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points} )
if input_labels is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels} )
return encoding_image_processor
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = max([point.shape[0] for point in input_points] )
__lowerCAmelCase = []
for i, point in enumerate(__SCREAMING_SNAKE_CASE ):
if point.shape[0] != expected_nb_points:
__lowerCAmelCase = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value],axis=0 )
__lowerCAmelCase = np.append(input_labels[i],[self.point_pad_value] )
processed_input_points.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = processed_input_points
return input_points, input_labels
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = original_size
__lowerCAmelCase , __lowerCAmelCase = self.image_processor._get_preprocess_shape(__SCREAMING_SNAKE_CASE,longest_edge=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = deepcopy(__SCREAMING_SNAKE_CASE ).astype(__SCREAMING_SNAKE_CASE )
if is_bounding_box:
__lowerCAmelCase = coords.reshape(-1,2,2 )
__lowerCAmelCase = coords[..., 0] * (new_w / old_w)
__lowerCAmelCase = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__lowerCAmelCase = coords.reshape(-1,4 )
return coords
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,):
'''simple docstring'''
if input_points is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks for TF or Torch tensor
__lowerCAmelCase = input_points.numpy().tolist()
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_points[0],__SCREAMING_SNAKE_CASE ):
raise ValueError("""Input points must be a list of list of floating points.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for input_point in input_points]
else:
__lowerCAmelCase = None
if input_labels is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ):
__lowerCAmelCase = input_labels.numpy().tolist()
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_labels[0],__SCREAMING_SNAKE_CASE ):
raise ValueError("""Input labels must be a list of list integers.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for label in input_labels]
else:
__lowerCAmelCase = None
if input_boxes is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ):
__lowerCAmelCase = input_boxes.numpy().tolist()
if (
not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0],__SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0][0],__SCREAMING_SNAKE_CASE )
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa ) for box in input_boxes]
else:
__lowerCAmelCase = None
return input_points, input_labels, input_boxes
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(__SCREAMING_SNAKE_CASE ) )
def lowerCamelCase__ ( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.image_processor.post_process_masks(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
| 689 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase = 5000_0000 ) -> int:
__lowerCAmelCase = set()
__lowerCAmelCase = int((limit - 24) ** (1 / 2) )
__lowerCAmelCase = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowercase ) ) )
for primea in primes:
__lowerCAmelCase = primea * primea
for primea in primes:
__lowerCAmelCase = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
__lowerCAmelCase = primea * primea * primea * primea
__lowerCAmelCase = square + cube + tetr
if total >= limit:
break
ret.add(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(f'{solution() = }')
| 689 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def _lowerCAmelCase ( lowercase , lowercase ) -> float:
__lowerCAmelCase = u
for i in range(1 , lowercase ):
__lowerCAmelCase = temp * (u - i)
return temp
def _lowerCAmelCase ( ) -> None:
__lowerCAmelCase = int(input("""enter the numbers of values: """ ) )
__lowerCAmelCase = []
for _ in range(lowercase ):
y.append([] )
for i in range(lowercase ):
for j in range(lowercase ):
y[i].append(lowercase )
__lowerCAmelCase = 0
print("""enter the values of parameters in a list: """ )
__lowerCAmelCase = list(map(lowercase , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(lowercase ):
__lowerCAmelCase = float(input() )
__lowerCAmelCase = int(input("""enter the value to interpolate: """ ) )
__lowerCAmelCase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , lowercase ):
for j in range(n - i ):
__lowerCAmelCase = y[j + 1][i - 1] - y[j][i - 1]
__lowerCAmelCase = y[0][0]
for i in range(1 , lowercase ):
summ += (ucal(lowercase , lowercase ) * y[0][i]) / math.factorial(lowercase )
print(f'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 689 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Optional[int] =TextToVideoSDPipeline
a : Optional[int] =TEXT_TO_IMAGE_PARAMS
a : Any =TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a : Union[str, Any] =frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D"""),up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D"""),cross_attention_dim=32,attention_head_dim=4,)
__lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0085,beta_end=0.012,beta_schedule="""scaled_linear""",clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,)
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],latent_channels=4,sample_size=1_28,)
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1e-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=10_00,hidden_act="""gelu""",projection_dim=5_12,)
__lowerCAmelCase = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = TextToVideoSDPipeline(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = """np"""
__lowerCAmelCase = sd_pipe(**__SCREAMING_SNAKE_CASE ).frames
__lowerCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__lowerCAmelCase = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available(),reason="""XFormers attention is only available with CUDA and `xformers` installed""",)
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
__lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowerCAmelCase = pipe.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=25,output_type="""pt""" ).frames
__lowerCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
__lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase = pipe.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=2,output_type="""pt""" ).frames
__lowerCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 689 | 1 |
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_a : List[str] = """true"""
def _lowerCAmelCase ( lowercase , lowercase=82 , lowercase=16 ) -> List[Any]:
set_seed(42 )
__lowerCAmelCase = RegressionModel()
__lowerCAmelCase = deepcopy(lowercase )
__lowerCAmelCase = RegressionDataset(length=lowercase )
__lowerCAmelCase = DataLoader(lowercase , batch_size=lowercase )
model.to(accelerator.device )
__lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowercase , lowercase )
return model, ddp_model, dataloader
def _lowerCAmelCase ( lowercase , lowercase=False ) -> Optional[int]:
__lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
__lowerCAmelCase = load_dataset("""glue""" , """mrpc""" , split="""validation""" )
def tokenize_function(lowercase ):
__lowerCAmelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase , max_length=lowercase )
return outputs
with accelerator.main_process_first():
__lowerCAmelCase = dataset.map(
lowercase , batched=lowercase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
__lowerCAmelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase ):
if use_longest:
return tokenizer.pad(lowercase , padding="""longest""" , return_tensors="""pt""" )
return tokenizer.pad(lowercase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return DataLoader(lowercase , shuffle=lowercase , collate_fn=lowercase , batch_size=16 )
def _lowerCAmelCase ( lowercase , lowercase ) -> List[str]:
__lowerCAmelCase = Accelerator(dispatch_batches=lowercase , split_batches=lowercase )
__lowerCAmelCase = get_dataloader(lowercase , not dispatch_batches )
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" , return_dict=lowercase )
__lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowercase , lowercase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> Tuple:
__lowerCAmelCase = []
for batch in dataloader:
__lowerCAmelCase , __lowerCAmelCase = batch.values()
with torch.no_grad():
__lowerCAmelCase = model(lowercase )
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__lowerCAmelCase , __lowerCAmelCase = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase )
targs.append(lowercase )
__lowerCAmelCase , __lowerCAmelCase = torch.cat(lowercase ), torch.cat(lowercase )
return logits, targs
def _lowerCAmelCase ( lowercase , lowercase=82 , lowercase=False , lowercase=False , lowercase=16 ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_basic_setup(lowercase , lowercase , lowercase )
__lowerCAmelCase , __lowerCAmelCase = generate_predictions(lowercase , lowercase , lowercase )
assert (
len(lowercase ) == num_samples
), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase )}'
def _lowerCAmelCase ( lowercase = False , lowercase = False ) -> int:
__lowerCAmelCase = evaluate.load("""glue""" , """mrpc""" )
__lowerCAmelCase , __lowerCAmelCase = get_mrpc_setup(lowercase , lowercase )
# First do baseline
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup["""no"""]
model.to(lowercase )
model.eval()
for batch in dataloader:
batch.to(lowercase )
with torch.inference_mode():
__lowerCAmelCase = model(**lowercase )
__lowerCAmelCase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase , references=batch["""labels"""] )
__lowerCAmelCase = metric.compute()
# Then do distributed
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
__lowerCAmelCase = model(**lowercase )
__lowerCAmelCase = outputs.logits.argmax(dim=-1 )
__lowerCAmelCase = batch["""labels"""]
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase , references=lowercase )
__lowerCAmelCase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def _lowerCAmelCase ( ) -> str:
__lowerCAmelCase = Accelerator(split_batches=lowercase , dispatch_batches=lowercase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(lowercase , lowercase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__lowerCAmelCase = Accelerator(split_batches=lowercase , dispatch_batches=lowercase )
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(lowercase , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
__lowerCAmelCase = Accelerator()
test_torch_metrics(lowercase , 512 )
accelerator.state._reset_state()
def _lowerCAmelCase ( lowercase ) -> str:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 689 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _lowerCAmelCase ( lowercase ) -> Optional[int]:
if not is_accelerate_available():
return method
__lowerCAmelCase = version.parse(accelerate.__version__ ).base_version
if version.parse(lowercase ) < version.parse("""0.17.0""" ):
return method
def wrapper(self , *lowercase , **lowercase ):
if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ):
self._hf_hook.pre_forward(self )
return method(self , *lowercase , **lowercase )
return wrapper
| 689 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class _UpperCAmelCase :
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=13,__SCREAMING_SNAKE_CASE=7,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=99,__SCREAMING_SNAKE_CASE=[1, 1, 2],__SCREAMING_SNAKE_CASE=1,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE=4,__SCREAMING_SNAKE_CASE=8,__SCREAMING_SNAKE_CASE=37,__SCREAMING_SNAKE_CASE="gelu_new",__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=5_12,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=4,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=False,):
'''simple docstring'''
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = block_sizes
__lowerCAmelCase = num_decoder_layers
__lowerCAmelCase = d_model
__lowerCAmelCase = n_head
__lowerCAmelCase = d_head
__lowerCAmelCase = d_inner
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = 2
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
__lowerCAmelCase = initializer_std
# Used in the tests to check the size of the first attention layer
__lowerCAmelCase = n_head
# Used in the tests to check the size of the first hidden state
__lowerCAmelCase = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__lowerCAmelCase = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__lowerCAmelCase = self.num_hidden_layers + 2
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size],self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size],self.num_choices )
__lowerCAmelCase = FunnelConfig(
vocab_size=self.vocab_size,block_sizes=self.block_sizes,num_decoder_layers=self.num_decoder_layers,d_model=self.d_model,n_head=self.n_head,d_head=self.d_head,d_inner=self.d_inner,hidden_act=self.hidden_act,hidden_dropout=self.hidden_dropout,attention_dropout=self.attention_dropout,activation_dropout=self.activation_dropout,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_std=self.initializer_std,)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = TFFunnelModel(config=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [input_ids, input_mask]
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.d_model) )
__lowerCAmelCase = False
__lowerCAmelCase = TFFunnelModel(config=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.d_model) )
__lowerCAmelCase = False
__lowerCAmelCase = TFFunnelModel(config=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.d_model) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = TFFunnelBaseModel(config=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [input_ids, input_mask]
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, 2, self.d_model) )
__lowerCAmelCase = False
__lowerCAmelCase = TFFunnelBaseModel(config=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, 3, self.d_model) )
__lowerCAmelCase = False
__lowerCAmelCase = TFFunnelBaseModel(config=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, 2, self.d_model) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = TFFunnelForPreTraining(config=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = TFFunnelForMaskedLM(config=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFFunnelForSequenceClassification(config=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = self.num_choices
__lowerCAmelCase = TFFunnelForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ),(1, self.num_choices, 1) )
__lowerCAmelCase = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ),(1, self.num_choices, 1) )
__lowerCAmelCase = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ),(1, self.num_choices, 1) )
__lowerCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFFunnelForTokenClassification(config=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = TFFunnelForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
a : str =(
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
a : Union[str, Any] =(
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
a : Optional[int] =False
a : Dict =False
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = TFFunnelModelTester(self )
__lowerCAmelCase = ConfigTester(self,config_class=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
@require_tf
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : List[Any] =(
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
a : List[Any] =False
a : Tuple =False
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = TFFunnelModelTester(self,base=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = ConfigTester(self,config_class=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE )
| 689 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
# load base model
__lowerCAmelCase = StableDiffusionPipeline.from_pretrained(lowercase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
__lowerCAmelCase = load_file(lowercase )
__lowerCAmelCase = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
__lowerCAmelCase = pipeline.text_encoder
else:
__lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
__lowerCAmelCase = pipeline.unet
# find the target layer
__lowerCAmelCase = layer_infos.pop(0 )
while len(lowercase ) > -1:
try:
__lowerCAmelCase = curr_layer.__getattr__(lowercase )
if len(lowercase ) > 0:
__lowerCAmelCase = layer_infos.pop(0 )
elif len(lowercase ) == 0:
break
except Exception:
if len(lowercase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
__lowerCAmelCase = layer_infos.pop(0 )
__lowerCAmelCase = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(lowercase )
else:
pair_keys.append(lowercase )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
__lowerCAmelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
__lowerCAmelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase ).unsqueeze(2 ).unsqueeze(3 )
else:
__lowerCAmelCase = state_dict[pair_keys[0]].to(torch.floataa )
__lowerCAmelCase = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase )
# update visited list
for item in pair_keys:
visited.append(lowercase )
return pipeline
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
_a : Optional[int] = parser.parse_args()
_a : Dict = args.base_model_path
_a : Optional[Any] = args.checkpoint_path
_a : Union[str, Any] = args.dump_path
_a : Optional[int] = args.lora_prefix_unet
_a : int = args.lora_prefix_text_encoder
_a : str = args.alpha
_a : Any = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_a : Tuple = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 689 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDModel(
sample_size=(32, 64),in_channels=1,out_channels=1,layers_per_block=2,block_out_channels=(1_28, 1_28),down_block_types=("""AttnDownBlock2D""", """DownBlock2D"""),up_block_types=("""UpBlock2D""", """AttnUpBlock2D"""),)
return model
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
sample_size=(64, 32),in_channels=1,out_channels=1,layers_per_block=2,block_out_channels=(1_28, 1_28),down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D"""),up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D"""),cross_attention_dim=10,)
return model
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
sample_size=(1_28, 64),in_channels=1,out_channels=1,latent_channels=1,layers_per_block=2,block_out_channels=(1_28, 1_28),down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D"""),up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D"""),)
__lowerCAmelCase = UNetaDModel(
sample_size=(64, 32),in_channels=1,out_channels=1,layers_per_block=2,block_out_channels=(1_28, 1_28),down_block_types=("""AttnDownBlock2D""", """DownBlock2D"""),up_block_types=("""UpBlock2D""", """AttnUpBlock2D"""),)
return vqvae, unet
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = Mel(
x_res=self.dummy_unet.config.sample_size[1],y_res=self.dummy_unet.config.sample_size[0],)
__lowerCAmelCase = DDPMScheduler()
__lowerCAmelCase = AudioDiffusionPipeline(vqvae=__SCREAMING_SNAKE_CASE,unet=self.dummy_unet,mel=__SCREAMING_SNAKE_CASE,scheduler=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
__lowerCAmelCase = pipe(generator=__SCREAMING_SNAKE_CASE,steps=4 )
__lowerCAmelCase = output.audios[0]
__lowerCAmelCase = output.images[0]
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
__lowerCAmelCase = pipe(generator=__SCREAMING_SNAKE_CASE,steps=4,return_dict=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
__lowerCAmelCase = np.frombuffer(image.tobytes(),dtype="""uint8""" )[:10]
__lowerCAmelCase = np.frombuffer(image_from_tuple.tobytes(),dtype="""uint8""" )[:10]
__lowerCAmelCase = np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
__lowerCAmelCase = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1],y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0],)
__lowerCAmelCase = DDIMScheduler()
__lowerCAmelCase = self.dummy_vqvae_and_unet
__lowerCAmelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0],unet=dummy_vqvae_and_unet[1],mel=__SCREAMING_SNAKE_CASE,scheduler=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
np.random.seed(0 )
__lowerCAmelCase = np.random.uniform(-1,1,((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
__lowerCAmelCase = pipe(raw_audio=__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,start_step=5,steps=10 )
__lowerCAmelCase = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
__lowerCAmelCase = np.frombuffer(image.tobytes(),dtype="""uint8""" )[:10]
__lowerCAmelCase = np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
__lowerCAmelCase = self.dummy_unet_condition
__lowerCAmelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0],unet=__SCREAMING_SNAKE_CASE,mel=__SCREAMING_SNAKE_CASE,scheduler=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
np.random.seed(0 )
__lowerCAmelCase = torch.rand((1, 1, 10) )
__lowerCAmelCase = pipe(generator=__SCREAMING_SNAKE_CASE,encoding=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = output.images[0]
__lowerCAmelCase = np.frombuffer(image.tobytes(),dtype="""uint8""" )[:10]
__lowerCAmelCase = np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = torch_device
__lowerCAmelCase = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
__lowerCAmelCase = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
__lowerCAmelCase = pipe(generator=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = output.audios[0]
__lowerCAmelCase = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
__lowerCAmelCase = np.frombuffer(image.tobytes(),dtype="""uint8""" )[:10]
__lowerCAmelCase = np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 689 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def _lowerCAmelCase ( lowercase = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2
def _lowerCAmelCase ( lowercase = "" ) -> bool:
if len(lowercase ) == 0:
return True
__lowerCAmelCase = input_str.replace(""" """ , """""" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__lowerCAmelCase = {}
for character in lower_case_input_str:
__lowerCAmelCase = character_freq_dict.get(lowercase , 0 ) + 1
__lowerCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _lowerCAmelCase ( lowercase = "" ) -> None:
print("""\nFor string = """ , lowercase , """:""" )
print(
"""> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(lowercase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
print(
"""> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(lowercase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
if __name__ == "__main__":
_a : int = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
_a : Optional[int] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
| 689 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Tuple =RoCBertTokenizer
a : Dict =None
a : List[Any] =False
a : Any =True
a : Optional[int] =filter_non_english
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().setUp()
__lowerCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
__lowerCAmelCase = {}
__lowerCAmelCase = {}
for i, value in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = i
__lowerCAmelCase = i
__lowerCAmelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCAmelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["""word_shape_file"""] )
__lowerCAmelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["""word_pronunciation_file"""] )
with open(self.vocab_file,"""w""",encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.word_shape_file,"""w""",encoding="""utf-8""" ) as word_shape_writer:
json.dump(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,ensure_ascii=__SCREAMING_SNAKE_CASE )
with open(self.word_pronunciation_file,"""w""",encoding="""utf-8""" ) as word_pronunciation_writer:
json.dump(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,ensure_ascii=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.tokenizer_class(self.vocab_file,self.word_shape_file,self.word_pronunciation_file )
__lowerCAmelCase = tokenizer.tokenize("""你好[SEP]你是谁""" )
self.assertListEqual(__SCREAMING_SNAKE_CASE,["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ),[5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__SCREAMING_SNAKE_CASE ),[5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__SCREAMING_SNAKE_CASE ),[5, 6, 2, 5, 7, 8] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ),["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ),["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ),["""hello"""] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE,strip_accents=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ),["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ),["""h\u00E9llo"""] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE,strip_accents=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ),["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ),["""hello"""] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ),["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ),["""hello"""] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ),["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE,strip_accents=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ),["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE,strip_accents=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ),["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE,never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ),["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
__lowerCAmelCase = {}
for i, token in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = i
__lowerCAmelCase = RoCBertWordpieceTokenizer(vocab=__SCREAMING_SNAKE_CASE,unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ),[] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ),["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ),["""[UNK]""", """runn""", """##ing"""] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) for t in ["""Test""", """\xad""", """test"""]],[["""[UNK]"""], [], ["""[UNK]"""]] )
if self.test_rust_tokenizer:
__lowerCAmelCase = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) for t in ["""Test""", """\xad""", """test"""]],[["""[UNK]"""], [], ["""[UNK]"""]] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
__lowerCAmelCase = tokenizer_r.encode_plus(
__SCREAMING_SNAKE_CASE,return_attention_mask=__SCREAMING_SNAKE_CASE,return_token_type_ids=__SCREAMING_SNAKE_CASE,return_offsets_mapping=__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = tokenizer_r.do_lower_case if hasattr(__SCREAMING_SNAKE_CASE,"""do_lower_case""" ) else False
__lowerCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results],tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results],tokens["""offset_mapping"""] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = ["""的""", """人""", """有"""]
__lowerCAmelCase = """""".join(__SCREAMING_SNAKE_CASE )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCAmelCase = True
__lowerCAmelCase = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer_p.encode(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer_r.encode(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = False
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer_r.encode(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer_p.encode(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
# it is expected that only the first Chinese character is not preceded by "##".
__lowerCAmelCase = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(__SCREAMING_SNAKE_CASE )
]
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.tokenizer_class(self.vocab_file,self.word_shape_file,self.word_pronunciation_file )
__lowerCAmelCase = tokenizer.encode("""你好""",add_special_tokens=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer.encode("""你是谁""",add_special_tokens=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.get_tokenizers(do_lower_case=__SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__lowerCAmelCase = """你好,你是谁"""
__lowerCAmelCase = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer.convert_tokens_to_shape_ids(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer.convert_tokens_to_pronunciation_ids(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer.prepare_for_model(
__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer.encode_plus(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
| 689 |
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _lowerCAmelCase ( lowercase ) -> List[Any]:
__lowerCAmelCase = VideoMAEConfig()
set_architecture_configs(lowercase , lowercase )
if "finetuned" not in model_name:
__lowerCAmelCase = False
if "finetuned" in model_name:
__lowerCAmelCase = """huggingface/label-files"""
if "kinetics" in model_name:
__lowerCAmelCase = 400
__lowerCAmelCase = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
__lowerCAmelCase = 174
__lowerCAmelCase = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
__lowerCAmelCase = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="""dataset""" ) , """r""" ) )
__lowerCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase ( lowercase , lowercase ) -> Any:
if "small" in model_name:
__lowerCAmelCase = 384
__lowerCAmelCase = 1536
__lowerCAmelCase = 12
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 3
__lowerCAmelCase = 192
__lowerCAmelCase = 768
elif "large" in model_name:
__lowerCAmelCase = 1024
__lowerCAmelCase = 4096
__lowerCAmelCase = 24
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 8
__lowerCAmelCase = 512
__lowerCAmelCase = 2048
elif "huge" in model_name:
__lowerCAmelCase = 1280
__lowerCAmelCase = 5120
__lowerCAmelCase = 32
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 8
__lowerCAmelCase = 640
__lowerCAmelCase = 2560
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def _lowerCAmelCase ( lowercase ) -> List[str]:
if "encoder." in name:
__lowerCAmelCase = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
__lowerCAmelCase = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
__lowerCAmelCase = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
__lowerCAmelCase = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__lowerCAmelCase = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
__lowerCAmelCase = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
__lowerCAmelCase = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
__lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
__lowerCAmelCase = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
__lowerCAmelCase = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
__lowerCAmelCase = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
__lowerCAmelCase = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
__lowerCAmelCase = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__lowerCAmelCase = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__lowerCAmelCase = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
__lowerCAmelCase = name.replace("""head""" , """classifier""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]:
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(lowercase )
if key.startswith("""encoder.""" ):
__lowerCAmelCase = key.replace("""encoder.""" , """""" )
if "qkv" in key:
__lowerCAmelCase = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
__lowerCAmelCase = config.decoder_hidden_size
__lowerCAmelCase = int(key_split[2] )
__lowerCAmelCase = """decoder.decoder_layers."""
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = config.hidden_size
__lowerCAmelCase = int(key_split[1] )
__lowerCAmelCase = """videomae.encoder.layer."""
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val
return orig_state_dict
def _lowerCAmelCase ( ) -> str:
__lowerCAmelCase = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__lowerCAmelCase = np.load(lowercase )
return list(lowercase )
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__lowerCAmelCase = get_videomae_config(lowercase )
if "finetuned" in model_name:
__lowerCAmelCase = VideoMAEForVideoClassification(lowercase )
else:
__lowerCAmelCase = VideoMAEForPreTraining(lowercase )
# download original checkpoint, hosted on Google Drive
__lowerCAmelCase = """pytorch_model.bin"""
gdown.cached_download(lowercase , lowercase , quiet=lowercase )
__lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" )
if "model" in files:
__lowerCAmelCase = files["""model"""]
else:
__lowerCAmelCase = files["""module"""]
__lowerCAmelCase = convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
# verify model on basic input
__lowerCAmelCase = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__lowerCAmelCase = prepare_video()
__lowerCAmelCase = image_processor(lowercase , return_tensors="""pt""" )
if "finetuned" not in model_name:
__lowerCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
__lowerCAmelCase = torch.load(lowercase )
__lowerCAmelCase = model(**lowercase )
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
__lowerCAmelCase = torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(f'Model name not supported. Should be one of {model_names}' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , lowercase , atol=1e-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__lowerCAmelCase = outputs.loss
assert torch.allclose(lowercase , lowercase , atol=1e-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model and image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase )
model.save_pretrained(lowercase )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase , organization="""nielsr""" )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_a : int = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 689 | 1 |
'''simple docstring'''
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
_a : Optional[int] = logging.get_logger(__name__)
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> Optional[Any]:
__lowerCAmelCase = os.path.abspath(lowercase )
logger.info(f'Converting TensorFlow checkpoint from {tf_path}' )
# Load weights from TF model
__lowerCAmelCase = tf.train.list_variables(lowercase )
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
__lowerCAmelCase = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'Skipping non-model layer {full_name}' )
continue
if "optimizer" in full_name:
logger.info(f'Skipping optimization layer {full_name}' )
continue
if name[0] == "model":
# ignore initial 'model'
__lowerCAmelCase = name[1:]
# figure out how many levels deep the name is
__lowerCAmelCase = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(lowercase )
# read data
__lowerCAmelCase = tf.train.load_variable(lowercase , lowercase )
names.append("""/""".join(lowercase ) )
arrays.append(lowercase )
logger.info(f'Read a total of {len(lowercase ):,} layers' )
# Sanity check
if len(set(lowercase ) ) != 1:
raise ValueError(f'Found layer names with different depths (layer depth {list(set(lowercase ) )})' )
__lowerCAmelCase = list(set(lowercase ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(lowercase , lowercase ):
__lowerCAmelCase = full_name.split("""/""" )
__lowerCAmelCase = model
__lowerCAmelCase = []
for i, m_name in enumerate(lowercase ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
__lowerCAmelCase = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
__lowerCAmelCase = getattr(lowercase , """embeddings""" )
__lowerCAmelCase = getattr(lowercase , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
__lowerCAmelCase = getattr(lowercase , """encoder""" )
__lowerCAmelCase = getattr(lowercase , """layer""" )
__lowerCAmelCase = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
__lowerCAmelCase = getattr(lowercase , """pooler""" )
__lowerCAmelCase = getattr(lowercase , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
__lowerCAmelCase = getattr(lowercase , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
__lowerCAmelCase = getattr(lowercase , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
__lowerCAmelCase = getattr(lowercase , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
__lowerCAmelCase = getattr(lowercase , """token_type_embeddings""" )
else:
raise ValueError(f'Unknown embedding layer with name {full_name}' )
trace.append("""weight""" )
__lowerCAmelCase = getattr(lowercase , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
__lowerCAmelCase = getattr(lowercase , """attention""" )
__lowerCAmelCase = getattr(lowercase , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
__lowerCAmelCase = getattr(lowercase , """attention""" )
__lowerCAmelCase = getattr(lowercase , """output""" )
__lowerCAmelCase = getattr(lowercase , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
__lowerCAmelCase = getattr(lowercase , """attention""" )
__lowerCAmelCase = getattr(lowercase , """output""" )
__lowerCAmelCase = getattr(lowercase , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
__lowerCAmelCase = getattr(lowercase , """output""" )
__lowerCAmelCase = getattr(lowercase , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
__lowerCAmelCase = getattr(lowercase , """output""" )
__lowerCAmelCase = getattr(lowercase , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
__lowerCAmelCase = getattr(lowercase , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
__lowerCAmelCase = getattr(lowercase , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
__lowerCAmelCase = getattr(lowercase , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
__lowerCAmelCase = getattr(lowercase , """intermediate""" )
__lowerCAmelCase = getattr(lowercase , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
__lowerCAmelCase = getattr(lowercase , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
__lowerCAmelCase = getattr(lowercase , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
__lowerCAmelCase = getattr(lowercase , """weight""" )
else:
logger.warning(f'Ignored {m_name}' )
# for certain layers reshape is necessary
__lowerCAmelCase = """.""".join(lowercase )
if re.match(R"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , lowercase ) or re.match(
R"""(\S+)\.attention\.output\.dense\.weight""" , lowercase ):
__lowerCAmelCase = array.reshape(pointer.data.shape )
if "kernel" in full_name:
__lowerCAmelCase = array.transpose()
if pointer.shape == array.shape:
__lowerCAmelCase = torch.from_numpy(lowercase )
else:
raise ValueError(
f'Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'
f' {array.shape}' )
logger.info(f'Successfully set variable {full_name} to PyTorch layer {trace}' )
return model
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> Any:
# Instantiate model
logger.info(f'Loading model based on config from {config_path}...' )
__lowerCAmelCase = BertConfig.from_json_file(lowercase )
__lowerCAmelCase = BertModel(lowercase )
# Load weights from checkpoint
logger.info(f'Loading weights from checkpoint {tf_checkpoint_path}...' )
load_tfa_weights_in_bert(lowercase , lowercase , lowercase )
# Save pytorch-model
logger.info(f'Saving PyTorch model to {pytorch_dump_path}...' )
torch.save(model.state_dict() , lowercase )
if __name__ == "__main__":
_a : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model (must include filename).""",
)
_a : Optional[Any] = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 689 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_a : Tuple = """\
"""
_a : Tuple = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
_a : Optional[Any] = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ),reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = 16,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__lowerCAmelCase = """cuda"""
else:
__lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__lowerCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__SCREAMING_SNAKE_CASE ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__lowerCAmelCase = model.config.max_length - 1
else:
__lowerCAmelCase = model.config.max_length
__lowerCAmelCase = tokenizer(
__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,padding=__SCREAMING_SNAKE_CASE,truncation=__SCREAMING_SNAKE_CASE,max_length=__SCREAMING_SNAKE_CASE,return_tensors="""pt""",return_attention_mask=__SCREAMING_SNAKE_CASE,).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = encodings["""input_ids"""]
__lowerCAmelCase = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ),1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ),2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__lowerCAmelCase = []
__lowerCAmelCase = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0,len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase = min(start_index + batch_size,len(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = encoded_texts[start_index:end_index]
__lowerCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
__lowerCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch],dim=1 )
__lowerCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size(),dtype=torch.intaa ).to(__SCREAMING_SNAKE_CASE ), attn_mask],dim=1 )
__lowerCAmelCase = encoded_batch
with torch.no_grad():
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE ).logits
__lowerCAmelCase = out_logits[..., :-1, :].contiguous()
__lowerCAmelCase = labels[..., 1:].contiguous()
__lowerCAmelCase = attn_mask[..., 1:].contiguous()
__lowerCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1,2 ),__SCREAMING_SNAKE_CASE ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__SCREAMING_SNAKE_CASE )}
| 689 | 1 |
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
_a : List[str] = logging.get_logger(__name__)
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
a : List[str] =UNetaDModel
a : Optional[int] ="""sample"""
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = 4
__lowerCAmelCase = 3
__lowerCAmelCase = (32, 32)
__lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.tensor([10] ).to(__SCREAMING_SNAKE_CASE )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return (3, 32, 32)
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return (3, 32, 32)
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
__lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
class _UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
a : Union[str, Any] =UNetaDModel
a : int ="""sample"""
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = 4
__lowerCAmelCase = 4
__lowerCAmelCase = (32, 32)
__lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.tensor([10] ).to(__SCREAMING_SNAKE_CASE )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return (4, 32, 32)
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return (4, 32, 32)
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
__lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""",output_loading_info=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
self.assertEqual(len(loading_info["""missing_keys"""] ),0 )
model.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""","""This test is supposed to run on GPU""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""",output_loading_info=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""","""This test is supposed to run on GPU""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""",output_loading_info=__SCREAMING_SNAKE_CASE )
model_accelerate.to(__SCREAMING_SNAKE_CASE )
model_accelerate.eval()
__lowerCAmelCase = torch.randn(
1,model_accelerate.config.in_channels,model_accelerate.config.sample_size,model_accelerate.config.sample_size,generator=torch.manual_seed(0 ),)
__lowerCAmelCase = noise.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model_accelerate(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
__lowerCAmelCase , __lowerCAmelCase = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""",output_loading_info=__SCREAMING_SNAKE_CASE,low_cpu_mem_usage=__SCREAMING_SNAKE_CASE )
model_normal_load.to(__SCREAMING_SNAKE_CASE )
model_normal_load.eval()
__lowerCAmelCase = model_normal_load(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )["""sample"""]
assert torch_all_close(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,rtol=1e-3 )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.randn(
1,model.config.in_channels,model.config.sample_size,model.config.sample_size,generator=torch.manual_seed(0 ),)
__lowerCAmelCase = noise.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__SCREAMING_SNAKE_CASE )
with torch.no_grad():
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ).sample
__lowerCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__lowerCAmelCase = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,rtol=1e-3 ) )
class _UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
a : List[Any] =UNetaDModel
a : Optional[int] ="""sample"""
@property
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=(32, 32) ):
'''simple docstring'''
__lowerCAmelCase = 4
__lowerCAmelCase = 3
__lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa,device=__SCREAMING_SNAKE_CASE )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return (3, 32, 32)
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return (3, 32, 32)
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1e-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
__lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""",output_loading_info=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
self.assertEqual(len(loading_info["""missing_keys"""] ),0 )
model.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.dummy_input
__lowerCAmelCase = floats_tensor((4, 3) + (2_56, 2_56) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = noise
__lowerCAmelCase = model(**__SCREAMING_SNAKE_CASE )
assert image is not None, "Make sure output is not None"
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = 4
__lowerCAmelCase = 3
__lowerCAmelCase = (2_56, 2_56)
__lowerCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__SCREAMING_SNAKE_CASE )
with torch.no_grad():
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ).sample
__lowerCAmelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
__lowerCAmelCase = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,rtol=1e-2 ) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = 4
__lowerCAmelCase = 3
__lowerCAmelCase = (32, 32)
__lowerCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__SCREAMING_SNAKE_CASE )
with torch.no_grad():
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ).sample
__lowerCAmelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
__lowerCAmelCase = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,rtol=1e-2 ) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
| 689 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Union[str, Any] =["""image_processor"""]
a : Dict ="""SamImageProcessor"""
def __init__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.image_processor
__lowerCAmelCase = -10
__lowerCAmelCase = self.image_processor.size["""longest_edge"""]
def __call__( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = self.image_processor(
__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
# pop arguments that are not used in the foward but used nevertheless
__lowerCAmelCase = encoding_image_processor["""original_sizes"""]
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks if Torch or TF tensor
__lowerCAmelCase = original_sizes.numpy()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._check_and_preprocess_points(
input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = self._normalize_and_convert(
__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,)
return encoding_image_processor
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="pt",):
'''simple docstring'''
if input_points is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0] ) for point in input_points
]
else:
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
for point, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__lowerCAmelCase , __lowerCAmelCase = self._pad_points_and_labels(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_labels is not None:
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0],is_bounding_box=__SCREAMING_SNAKE_CASE )
for box in input_boxes
]
else:
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,is_bounding_box=__SCREAMING_SNAKE_CASE )
for box, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
]
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
__lowerCAmelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points} )
if input_labels is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels} )
return encoding_image_processor
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = max([point.shape[0] for point in input_points] )
__lowerCAmelCase = []
for i, point in enumerate(__SCREAMING_SNAKE_CASE ):
if point.shape[0] != expected_nb_points:
__lowerCAmelCase = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value],axis=0 )
__lowerCAmelCase = np.append(input_labels[i],[self.point_pad_value] )
processed_input_points.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = processed_input_points
return input_points, input_labels
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = original_size
__lowerCAmelCase , __lowerCAmelCase = self.image_processor._get_preprocess_shape(__SCREAMING_SNAKE_CASE,longest_edge=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = deepcopy(__SCREAMING_SNAKE_CASE ).astype(__SCREAMING_SNAKE_CASE )
if is_bounding_box:
__lowerCAmelCase = coords.reshape(-1,2,2 )
__lowerCAmelCase = coords[..., 0] * (new_w / old_w)
__lowerCAmelCase = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__lowerCAmelCase = coords.reshape(-1,4 )
return coords
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,):
'''simple docstring'''
if input_points is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks for TF or Torch tensor
__lowerCAmelCase = input_points.numpy().tolist()
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_points[0],__SCREAMING_SNAKE_CASE ):
raise ValueError("""Input points must be a list of list of floating points.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for input_point in input_points]
else:
__lowerCAmelCase = None
if input_labels is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ):
__lowerCAmelCase = input_labels.numpy().tolist()
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_labels[0],__SCREAMING_SNAKE_CASE ):
raise ValueError("""Input labels must be a list of list integers.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for label in input_labels]
else:
__lowerCAmelCase = None
if input_boxes is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ):
__lowerCAmelCase = input_boxes.numpy().tolist()
if (
not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0],__SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0][0],__SCREAMING_SNAKE_CASE )
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa ) for box in input_boxes]
else:
__lowerCAmelCase = None
return input_points, input_labels, input_boxes
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(__SCREAMING_SNAKE_CASE ) )
def lowerCamelCase__ ( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.image_processor.post_process_masks(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
| 689 | 1 |
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.values[key]
def lowerCamelCase__ ( self ):
'''simple docstring'''
return (
sum(self.charge_factor - len(__SCREAMING_SNAKE_CASE ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__SCREAMING_SNAKE_CASE ) == 0
):
return key
return super()._collision_resolution(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
| 689 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
_a : int = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
a : Optional[str] =field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
a : Optional[str] =field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
a : int =field(
default=10_24 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
a : bool =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the training data."""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the validation data."""} )
a : Optional[str] =field(default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the test data."""} )
def lowerCamelCase__ ( self ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
__lowerCAmelCase = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__lowerCAmelCase = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _UpperCAmelCase :
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a : str =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def _lowerCAmelCase ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
__lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(lowercase )
datasets.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__lowerCAmelCase = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__lowerCAmelCase = data_args.train_file.split(""".""" )[-1]
__lowerCAmelCase = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__lowerCAmelCase = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(f'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
__lowerCAmelCase = load_dataset("""csv""" , data_files=lowercase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__lowerCAmelCase = load_dataset("""json""" , data_files=lowercase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__lowerCAmelCase = raw_datasets["""train"""].features["""label"""].names
__lowerCAmelCase = len(lowercase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__lowerCAmelCase = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowercase , )
__lowerCAmelCase = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__lowerCAmelCase = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__lowerCAmelCase = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__lowerCAmelCase = {"""Refused""": 0, """Entailed""": 1}
__lowerCAmelCase = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
__lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowercase ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowercase ):
__lowerCAmelCase = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
__lowerCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__lowerCAmelCase = examples["""statement"""]
__lowerCAmelCase = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
__lowerCAmelCase = tokenizer(lowercase , lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase )
__lowerCAmelCase = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
__lowerCAmelCase = raw_datasets.map(
lowercase , batched=lowercase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__lowerCAmelCase = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
__lowerCAmelCase = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__lowerCAmelCase = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
__lowerCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
__lowerCAmelCase = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
__lowerCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowercase ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase ):
__lowerCAmelCase = p.predictions[0] if isinstance(p.predictions , lowercase ) else p.predictions
__lowerCAmelCase = np.argmax(lowercase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__lowerCAmelCase = default_data_collator
elif training_args.fpaa:
__lowerCAmelCase = DataCollatorWithPadding(lowercase , pad_to_multiple_of=8 )
else:
__lowerCAmelCase = None
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=lowercase , args=lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase , tokenizer=lowercase , data_collator=lowercase , )
# Training
if training_args.do_train:
__lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowercase )
__lowerCAmelCase = train_result.metrics
__lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase )
)
__lowerCAmelCase = min(lowercase , len(lowercase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , lowercase )
trainer.save_metrics("""train""" , lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowerCAmelCase = trainer.evaluate(eval_dataset=lowercase )
__lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase )
__lowerCAmelCase = min(lowercase , len(lowercase ) )
trainer.log_metrics("""eval""" , lowercase )
trainer.save_metrics("""eval""" , lowercase )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__lowerCAmelCase = predict_dataset.remove_columns("""label""" )
__lowerCAmelCase = trainer.predict(lowercase , metric_key_prefix="""predict""" ).predictions
__lowerCAmelCase = np.argmax(lowercase , axis=1 )
__lowerCAmelCase = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(lowercase , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(lowercase ):
__lowerCAmelCase = label_list[item]
writer.write(f'{index}\t{item}\n' )
__lowerCAmelCase = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase )
else:
trainer.create_model_card(**lowercase )
def _lowerCAmelCase ( lowercase ) -> str:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 689 | 1 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _lowerCAmelCase ( lowercase ) -> Optional[int]:
if not is_accelerate_available():
return method
__lowerCAmelCase = version.parse(accelerate.__version__ ).base_version
if version.parse(lowercase ) < version.parse("""0.17.0""" ):
return method
def wrapper(self , *lowercase , **lowercase ):
if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ):
self._hf_hook.pre_forward(self )
return method(self , *lowercase , **lowercase )
return wrapper
| 689 |
'''simple docstring'''
import os
import sys
import unittest
_a : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_a : Union[str, Any] = os.path.join(git_repo_path, """src""", """diffusers""")
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = find_backend(""" if not is_torch_available():""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__lowerCAmelCase = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__lowerCAmelCase = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch_and_transformers_and_onnx""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""",__SCREAMING_SNAKE_CASE )
self.assertIn("""torch_and_transformers""",__SCREAMING_SNAKE_CASE )
self.assertIn("""flax_and_transformers""",__SCREAMING_SNAKE_CASE )
self.assertIn("""torch_and_transformers_and_onnx""",__SCREAMING_SNAKE_CASE )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""",objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""",objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""",objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""",objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""",objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""",objects["""torch_and_transformers_and_onnx"""] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = create_dummy_object("""CONSTANT""","""'torch'""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""\nCONSTANT = None\n""" )
__lowerCAmelCase = create_dummy_object("""function""","""'torch'""" )
self.assertEqual(
__SCREAMING_SNAKE_CASE,"""\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
__lowerCAmelCase = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
__lowerCAmelCase = create_dummy_object("""FakeClass""","""'torch'""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
__lowerCAmelCase = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""],__SCREAMING_SNAKE_CASE )
| 689 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_a : List[Any] = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Any = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
_a : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase ) -> tuple[int, int]:
try:
__lowerCAmelCase = float(lowercase )
except ValueError:
raise ValueError("""Please enter a valid number""" )
__lowerCAmelCase = decimal - int(lowercase )
if fractional_part == 0:
return int(lowercase ), 1
else:
__lowerCAmelCase = len(str(lowercase ).split(""".""" )[1] )
__lowerCAmelCase = int(decimal * (10**number_of_frac_digits) )
__lowerCAmelCase = 10**number_of_frac_digits
__lowerCAmelCase , __lowerCAmelCase = denominator, numerator
while True:
__lowerCAmelCase = dividend % divisor
if remainder == 0:
break
__lowerCAmelCase , __lowerCAmelCase = divisor, remainder
__lowerCAmelCase , __lowerCAmelCase = numerator / divisor, denominator / divisor
return int(lowercase ), int(lowercase )
if __name__ == "__main__":
print(f'{decimal_to_fraction(2) = }')
print(f'{decimal_to_fraction(89.0) = }')
print(f'{decimal_to_fraction("67") = }')
print(f'{decimal_to_fraction("45.0") = }')
print(f'{decimal_to_fraction(1.5) = }')
print(f'{decimal_to_fraction("6.25") = }')
print(f'{decimal_to_fraction("78td") = }')
| 689 | 1 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE = "arrow",**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
super().__init__(
split=__SCREAMING_SNAKE_CASE,features=__SCREAMING_SNAKE_CASE,cache_dir=__SCREAMING_SNAKE_CASE,keep_in_memory=__SCREAMING_SNAKE_CASE,streaming=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = load_from_cache_file
__lowerCAmelCase = file_format
__lowerCAmelCase = Spark(
df=__SCREAMING_SNAKE_CASE,features=__SCREAMING_SNAKE_CASE,cache_dir=__SCREAMING_SNAKE_CASE,working_dir=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
def lowerCamelCase__ ( self ):
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__lowerCAmelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__SCREAMING_SNAKE_CASE,file_format=self._file_format,)
return self.builder.as_dataset(split=self.split )
| 689 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_a : Dict = _symbol_database.Default()
_a : Union[str, Any] = _descriptor_pool.Default().AddSerializedFile(
b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
_a : str = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_a : str = None
_a : Union[str, Any] = b"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_a : Optional[int] = 4_5
_a : List[Any] = 1_5_8_1
_a : str = 1_5_1_7
_a : Optional[Any] = 1_5_7_0
_a : List[str] = 1_5_8_4
_a : List[Any] = 1_7_9_3
_a : Union[str, Any] = 1_7_9_5
_a : Tuple = 1_9_1_6
_a : List[Any] = 1_8_6_4
_a : Any = 1_9_0_5
_a : Optional[Any] = 1_9_1_9
_a : Optional[int] = 2_4_2_9
_a : Tuple = 2_2_0_8
_a : Optional[Any] = 2_4_1_8
_a : List[Any] = 2_3_2_3
_a : str = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 689 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a : Optional[Any] = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = ["""XGLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = ["""XGLMTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[int] = [
"""XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XGLMForCausalLM""",
"""XGLMModel""",
"""XGLMPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
"""FlaxXGLMForCausalLM""",
"""FlaxXGLMModel""",
"""FlaxXGLMPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = [
"""TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXGLMForCausalLM""",
"""TFXGLMModel""",
"""TFXGLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 689 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : torch.FloatTensor
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE=True,):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = layers_per_block
__lowerCAmelCase = torch.nn.Convad(
__SCREAMING_SNAKE_CASE,block_out_channels[0],kernel_size=3,stride=1,padding=1,)
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
# down
__lowerCAmelCase = block_out_channels[0]
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = block_out_channels[i]
__lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase = get_down_block(
__SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,add_downsample=not is_final_block,resnet_eps=1e-6,downsample_padding=0,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
__lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""",attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
# out
__lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[-1],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = 2 * out_channels if double_z else out_channels
__lowerCAmelCase = nn.Convad(block_out_channels[-1],__SCREAMING_SNAKE_CASE,3,padding=1 )
__lowerCAmelCase = False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = x
__lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(""">=""","""1.11.0""" ):
for down_block in self.down_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
__lowerCAmelCase = down_block(__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE )
# post-process
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE="group",):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = layers_per_block
__lowerCAmelCase = nn.Convad(
__SCREAMING_SNAKE_CASE,block_out_channels[-1],kernel_size=3,stride=1,padding=1,)
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
__lowerCAmelCase = in_channels if norm_type == """spatial""" else None
# mid
__lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type,attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
# up
__lowerCAmelCase = list(reversed(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = reversed_block_out_channels[i]
__lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase = get_up_block(
__SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block + 1,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,prev_output_channel=__SCREAMING_SNAKE_CASE,add_upsample=not is_final_block,resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,resnet_time_scale_shift=__SCREAMING_SNAKE_CASE,)
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = output_channel
# out
if norm_type == "spatial":
__lowerCAmelCase = SpatialNorm(block_out_channels[0],__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[0],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = nn.Convad(block_out_channels[0],__SCREAMING_SNAKE_CASE,3,padding=1 )
__lowerCAmelCase = False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
__lowerCAmelCase = z
__lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(""">=""","""1.11.0""" ):
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
else:
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
else:
# middle
__lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = up_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="random",__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = n_e
__lowerCAmelCase = vq_embed_dim
__lowerCAmelCase = beta
__lowerCAmelCase = legacy
__lowerCAmelCase = nn.Embedding(self.n_e,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e,1.0 / self.n_e )
__lowerCAmelCase = remap
if self.remap is not None:
self.register_buffer("""used""",torch.tensor(np.load(self.remap ) ) )
__lowerCAmelCase = self.used.shape[0]
__lowerCAmelCase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__lowerCAmelCase = self.re_embed
__lowerCAmelCase = self.re_embed + 1
print(
f'Remapping {self.n_e} indices to {self.re_embed} indices. '
f'Using {self.unknown_index} for unknown indices.' )
else:
__lowerCAmelCase = n_e
__lowerCAmelCase = sane_index_shape
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
__lowerCAmelCase = inds.reshape(ishape[0],-1 )
__lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = (inds[:, :, None] == used[None, None, ...]).long()
__lowerCAmelCase = match.argmax(-1 )
__lowerCAmelCase = match.sum(2 ) < 1
if self.unknown_index == "random":
__lowerCAmelCase = torch.randint(0,self.re_embed,size=new[unknown].shape ).to(device=new.device )
else:
__lowerCAmelCase = self.unknown_index
return new.reshape(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
__lowerCAmelCase = inds.reshape(ishape[0],-1 )
__lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
__lowerCAmelCase = 0 # simply set to zero
__lowerCAmelCase = torch.gather(used[None, :][inds.shape[0] * [0], :],1,__SCREAMING_SNAKE_CASE )
return back.reshape(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = z.permute(0,2,3,1 ).contiguous()
__lowerCAmelCase = z.view(-1,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__lowerCAmelCase = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE,self.embedding.weight ),dim=1 )
__lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape )
__lowerCAmelCase = None
__lowerCAmelCase = None
# compute loss for embedding
if not self.legacy:
__lowerCAmelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__lowerCAmelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__lowerCAmelCase = z + (z_q - z).detach()
# reshape back to match original input shape
__lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous()
if self.remap is not None:
__lowerCAmelCase = min_encoding_indices.reshape(z.shape[0],-1 ) # add batch axis
__lowerCAmelCase = self.remap_to_used(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = min_encoding_indices.reshape(-1,1 ) # flatten
if self.sane_index_shape:
__lowerCAmelCase = min_encoding_indices.reshape(z_q.shape[0],z_q.shape[2],z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if self.remap is not None:
__lowerCAmelCase = indices.reshape(shape[0],-1 ) # add batch axis
__lowerCAmelCase = self.unmap_to_all(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE )
if shape is not None:
__lowerCAmelCase = z_q.view(__SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
__lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous()
return z_q
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = parameters
__lowerCAmelCase , __lowerCAmelCase = torch.chunk(__SCREAMING_SNAKE_CASE,2,dim=1 )
__lowerCAmelCase = torch.clamp(self.logvar,-30.0,20.0 )
__lowerCAmelCase = deterministic
__lowerCAmelCase = torch.exp(0.5 * self.logvar )
__lowerCAmelCase = torch.exp(self.logvar )
if self.deterministic:
__lowerCAmelCase = __lowerCAmelCase = torch.zeros_like(
self.mean,device=self.parameters.device,dtype=self.parameters.dtype )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
__lowerCAmelCase = randn_tensor(
self.mean.shape,generator=__SCREAMING_SNAKE_CASE,device=self.parameters.device,dtype=self.parameters.dtype )
__lowerCAmelCase = self.mean + self.std * sample
return x
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean,2 ) + self.var - 1.0 - self.logvar,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar,dim=[1, 2, 3],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
__lowerCAmelCase = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean,2 ) / self.var,dim=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.mean
| 689 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a : str = logging.get_logger(__name__)
_a : Optional[int] = {"""vocab_file""": """spm_char.model"""}
_a : Optional[int] = {
"""vocab_file""": {
"""microsoft/speecht5_asr""": """https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model""",
"""microsoft/speecht5_tts""": """https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model""",
"""microsoft/speecht5_vc""": """https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model""",
}
}
_a : Optional[int] = {
"""microsoft/speecht5_asr""": 1_0_2_4,
"""microsoft/speecht5_tts""": 1_0_2_4,
"""microsoft/speecht5_vc""": 1_0_2_4,
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[str] =VOCAB_FILES_NAMES
a : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
a : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[Any] =["""input_ids""", """attention_mask"""]
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE="<s>",__SCREAMING_SNAKE_CASE="</s>",__SCREAMING_SNAKE_CASE="<unk>",__SCREAMING_SNAKE_CASE="<pad>",__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE,eos_token=__SCREAMING_SNAKE_CASE,unk_token=__SCREAMING_SNAKE_CASE,pad_token=__SCREAMING_SNAKE_CASE,sp_model_kwargs=self.sp_model_kwargs,**__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = vocab_file
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
return state
def __setstate__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self,"""sp_model_kwargs""" ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.sp_model.encode(__SCREAMING_SNAKE_CASE,out_type=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.sp_model.piece_to_id(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
return token
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
__lowerCAmelCase = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE,token_ids_a=__SCREAMING_SNAKE_CASE,already_has_special_tokens=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [1]
if token_ids_a is None:
return ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
return ([0] * len(__SCREAMING_SNAKE_CASE )) + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE,"""wb""" ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 689 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_a : Optional[int] = logging.get_logger(__name__)
_a : int = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_a : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(lowerCAmelCase_ )} )
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
a : int =field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : int =field(
default=1_28 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
a : int =field(
default=64 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
a : int =field(
default=30 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
a : float =field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a : int =field(
default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a : int =field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
a : int =field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Optional[Any] ="""train"""
a : Optional[int] ="""dev"""
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : SquadDataTrainingArguments
a : List[SquadFeatures]
a : Split
a : bool
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = Split.train,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = "pt",):
'''simple docstring'''
__lowerCAmelCase = args
__lowerCAmelCase = is_language_sensitive
__lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
try:
__lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
__lowerCAmelCase = mode
# Load data features from cache or dataset file
__lowerCAmelCase = """v2""" if args.version_2_with_negative else """v1"""
__lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase = cached_features_file + """.lock"""
with FileLock(__SCREAMING_SNAKE_CASE ):
if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache:
__lowerCAmelCase = time.time()
__lowerCAmelCase = torch.load(__SCREAMING_SNAKE_CASE )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__lowerCAmelCase = self.old_features["""features"""]
__lowerCAmelCase = self.old_features.get("""dataset""",__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.old_features.get("""examples""",__SCREAMING_SNAKE_CASE )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
""" future run""" )
else:
if mode == Split.dev:
__lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
__lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
__lowerCAmelCase , __lowerCAmelCase = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__SCREAMING_SNAKE_CASE,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples},__SCREAMING_SNAKE_CASE,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.features[i]
__lowerCAmelCase = torch.tensor(feature.input_ids,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.attention_mask,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.token_type_ids,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.cls_index,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.p_mask,dtype=torch.float )
__lowerCAmelCase = torch.tensor(feature.is_impossible,dtype=torch.float )
__lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__lowerCAmelCase = torch.tensor(feature.start_position,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 689 | 1 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _UpperCAmelCase :
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=13,__SCREAMING_SNAKE_CASE=7,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=99,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE=5,__SCREAMING_SNAKE_CASE=4,__SCREAMING_SNAKE_CASE=37,__SCREAMING_SNAKE_CASE="gelu",__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=5_12,__SCREAMING_SNAKE_CASE=16,__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=4,__SCREAMING_SNAKE_CASE=None,):
'''simple docstring'''
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size],self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size],self.num_choices )
__lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__SCREAMING_SNAKE_CASE,initializer_range=self.initializer_range,)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = LlamaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = True
__lowerCAmelCase = LlamaModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,encoder_hidden_states=__SCREAMING_SNAKE_CASE,encoder_attention_mask=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,encoder_hidden_states=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = LlamaForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = LlamaForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
# first forward pass
__lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,encoder_hidden_states=__SCREAMING_SNAKE_CASE,encoder_attention_mask=__SCREAMING_SNAKE_CASE,use_cache=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((self.batch_size, 3),config.vocab_size )
__lowerCAmelCase = ids_tensor((self.batch_size, 3),vocab_size=2 )
# append to next input_ids and
__lowerCAmelCase = torch.cat([input_ids, next_tokens],dim=-1 )
__lowerCAmelCase = torch.cat([input_mask, next_mask],dim=-1 )
__lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,encoder_hidden_states=__SCREAMING_SNAKE_CASE,encoder_attention_mask=__SCREAMING_SNAKE_CASE,output_hidden_states=__SCREAMING_SNAKE_CASE,)["""hidden_states"""][0]
__lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,encoder_hidden_states=__SCREAMING_SNAKE_CASE,encoder_attention_mask=__SCREAMING_SNAKE_CASE,past_key_values=__SCREAMING_SNAKE_CASE,output_hidden_states=__SCREAMING_SNAKE_CASE,)["""hidden_states"""][0]
# select random slice
__lowerCAmelCase = ids_tensor((1,),output_from_past.shape[-1] ).item()
__lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,atol=1e-3 ) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
a : int =(LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
a : List[Any] =(LlamaForCausalLM,) if is_torch_available() else ()
a : List[Any] =(
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
a : List[Any] =False
a : Dict =False
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = LlamaModelTester(self )
__lowerCAmelCase = ConfigTester(self,config_class=__SCREAMING_SNAKE_CASE,hidden_size=37 )
def lowerCamelCase__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = 3
__lowerCAmelCase = input_dict["""input_ids"""]
__lowerCAmelCase = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = ids_tensor([self.model_tester.batch_size],self.model_tester.type_sequence_label_size )
__lowerCAmelCase = LlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape,(self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = 3
__lowerCAmelCase = """single_label_classification"""
__lowerCAmelCase = input_dict["""input_ids"""]
__lowerCAmelCase = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = ids_tensor([self.model_tester.batch_size],self.model_tester.type_sequence_label_size )
__lowerCAmelCase = LlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape,(self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = 3
__lowerCAmelCase = """multi_label_classification"""
__lowerCAmelCase = input_dict["""input_ids"""]
__lowerCAmelCase = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels],self.model_tester.type_sequence_label_size ).to(torch.float )
__lowerCAmelCase = LlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = ids_tensor([1, 10],config.vocab_size )
__lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )],config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowerCAmelCase = LlamaModel(__SCREAMING_SNAKE_CASE )
original_model.to(__SCREAMING_SNAKE_CASE )
original_model.eval()
__lowerCAmelCase = original_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
__lowerCAmelCase = original_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowerCAmelCase = {"""type""": scaling_type, """factor""": 10.0}
__lowerCAmelCase = LlamaModel(__SCREAMING_SNAKE_CASE )
scaled_model.to(__SCREAMING_SNAKE_CASE )
scaled_model.eval()
__lowerCAmelCase = scaled_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
__lowerCAmelCase = scaled_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,atol=1e-5 ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__lowerCAmelCase = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""",device_map="""auto""" )
__lowerCAmelCase = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__lowerCAmelCase = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ),__SCREAMING_SNAKE_CASE,atol=1e-2,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__lowerCAmelCase = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30],__SCREAMING_SNAKE_CASE,atol=1e-5,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__lowerCAmelCase = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""",device_map="""auto""" )
__lowerCAmelCase = model(torch.tensor(__SCREAMING_SNAKE_CASE ) )
# Expected mean on dim = -1
__lowerCAmelCase = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ),__SCREAMING_SNAKE_CASE,atol=1e-2,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__lowerCAmelCase = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30],__SCREAMING_SNAKE_CASE,atol=1e-5,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__lowerCAmelCase = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""",device_map="""auto""" )
__lowerCAmelCase = model(torch.tensor(__SCREAMING_SNAKE_CASE ) )
# Expected mean on dim = -1
__lowerCAmelCase = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ),__SCREAMING_SNAKE_CASE,atol=1e-2,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__lowerCAmelCase = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ),__SCREAMING_SNAKE_CASE,atol=1e-2,rtol=1e-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__lowerCAmelCase = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""",device_map="""auto""" )
__lowerCAmelCase = model(torch.tensor(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]],dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ),__SCREAMING_SNAKE_CASE,atol=1e-2,rtol=1e-2 )
# fmt: off
__lowerCAmelCase = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30],__SCREAMING_SNAKE_CASE,atol=1e-5,rtol=1e-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
__lowerCAmelCase = """Simply put, the theory of relativity states that """
__lowerCAmelCase = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
__lowerCAmelCase = tokenizer.encode(__SCREAMING_SNAKE_CASE,return_tensors="""pt""" )
__lowerCAmelCase = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""",device_map="""sequential""",use_safetensors=__SCREAMING_SNAKE_CASE )
# greedy generation outputs
__lowerCAmelCase = model.generate(__SCREAMING_SNAKE_CASE,max_new_tokens=64,top_p=__SCREAMING_SNAKE_CASE,temperature=1,do_sample=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer.decode(generated_ids[0],skip_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
| 689 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _lowerCAmelCase ( lowercase ) -> Optional[Any]:
# vision encoder
if "img_encoder.pos_embed" in name:
__lowerCAmelCase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
__lowerCAmelCase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
__lowerCAmelCase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
__lowerCAmelCase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
__lowerCAmelCase = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
__lowerCAmelCase = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
__lowerCAmelCase = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
__lowerCAmelCase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
__lowerCAmelCase = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
__lowerCAmelCase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
__lowerCAmelCase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
__lowerCAmelCase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
__lowerCAmelCase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
__lowerCAmelCase = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
__lowerCAmelCase = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
__lowerCAmelCase = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
__lowerCAmelCase = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
__lowerCAmelCase = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
__lowerCAmelCase = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
__lowerCAmelCase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
__lowerCAmelCase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
__lowerCAmelCase = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
__lowerCAmelCase = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> Dict:
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(lowercase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase = key.split(""".""" )
__lowerCAmelCase , __lowerCAmelCase = int(key_split[2] ), int(key_split[4] )
__lowerCAmelCase = config.vision_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase = key.split(""".""" )
__lowerCAmelCase = int(key_split[3] )
__lowerCAmelCase = config.text_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[
dim : dim * 2, :
]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
else:
__lowerCAmelCase = rename_key(lowercase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
__lowerCAmelCase = val.squeeze_()
else:
__lowerCAmelCase = val
return orig_state_dict
def _lowerCAmelCase ( ) -> str:
__lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCAmelCase = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase , lowercase="groupvit-gcc-yfcc" , lowercase=False ) -> List[Any]:
__lowerCAmelCase = GroupViTConfig()
__lowerCAmelCase = GroupViTModel(lowercase ).eval()
__lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" )["""model"""]
__lowerCAmelCase = convert_state_dict(lowercase , lowercase )
__lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowercase , strict=lowercase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowercase ) == 0)
# verify result
__lowerCAmelCase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowercase , padding=lowercase , return_tensors="""pt""" )
with torch.no_grad():
__lowerCAmelCase = model(**lowercase )
if model_name == "groupvit-gcc-yfcc":
__lowerCAmelCase = torch.tensor([[13.35_23, 6.36_29]] )
elif model_name == "groupvit-gcc-redcaps":
__lowerCAmelCase = torch.tensor([[16.18_73, 8.62_30]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , lowercase , atol=1e-3 )
processor.save_pretrained(lowercase )
model.save_pretrained(lowercase )
print("""Successfully saved processor and model to""" , lowercase )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowercase , organization="""nielsr""" )
model.push_to_hub(lowercase , organization="""nielsr""" )
if __name__ == "__main__":
_a : int = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
_a : List[str] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 689 | 1 |
'''simple docstring'''
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_a : Tuple = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
_a : Any = (
subprocess.check_output(f'git diff --diff-filter=d --name-only {fork_point_sha}'.split()).decode("""utf-8""").split()
)
_a : Any = """|""".join(sys.argv[1:])
_a : Any = re.compile(rf'^({joined_dirs}).*?\.py$')
_a : Optional[int] = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 689 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_a : Tuple = logging.get_logger(__name__)
_a : Optional[int] = ["""model.decoder.embed_positions.weights"""]
def _lowerCAmelCase ( lowercase ) -> Optional[Any]:
if "emb" in name:
__lowerCAmelCase = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
__lowerCAmelCase = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
__lowerCAmelCase = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
__lowerCAmelCase = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
__lowerCAmelCase = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
__lowerCAmelCase = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
__lowerCAmelCase = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
__lowerCAmelCase = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
__lowerCAmelCase = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
__lowerCAmelCase = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> Tuple[Dict, Dict]:
__lowerCAmelCase = list(state_dict.keys() )
__lowerCAmelCase = {}
for key in keys:
__lowerCAmelCase = state_dict.pop(lowercase )
__lowerCAmelCase = rename_keys(lowercase )
if "in_proj_weight" in key:
# split fused qkv proj
__lowerCAmelCase = val[:hidden_size, :]
__lowerCAmelCase = val[hidden_size : 2 * hidden_size, :]
__lowerCAmelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__lowerCAmelCase = val
else:
__lowerCAmelCase = val
return state_dict, enc_dec_proj_state_dict
def _lowerCAmelCase ( lowercase ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
__lowerCAmelCase = 1024
__lowerCAmelCase = 24
__lowerCAmelCase = 16
elif checkpoint == "medium":
__lowerCAmelCase = 1536
__lowerCAmelCase = 48
__lowerCAmelCase = 24
elif checkpoint == "large":
__lowerCAmelCase = 2048
__lowerCAmelCase = 48
__lowerCAmelCase = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
__lowerCAmelCase = MusicgenDecoderConfig(
hidden_size=lowercase , ffn_dim=hidden_size * 4 , num_hidden_layers=lowercase , num_attention_heads=lowercase , )
return config
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase=None , lowercase=None , lowercase="cpu" ) -> Optional[Any]:
__lowerCAmelCase = MusicGen.get_pretrained(lowercase , device=lowercase )
__lowerCAmelCase = decoder_config_from_checkpoint(lowercase )
__lowerCAmelCase = fairseq_model.lm.state_dict()
__lowerCAmelCase , __lowerCAmelCase = rename_state_dict(
lowercase , hidden_size=decoder_config.hidden_size )
__lowerCAmelCase = TaEncoderModel.from_pretrained("""t5-base""" )
__lowerCAmelCase = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
__lowerCAmelCase = MusicgenForCausalLM(lowercase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__lowerCAmelCase , __lowerCAmelCase = decoder.load_state_dict(lowercase , strict=lowercase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase )
if len(lowercase ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(lowercase ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
__lowerCAmelCase = MusicgenForConditionalGeneration(text_encoder=lowercase , audio_encoder=lowercase , decoder=lowercase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase )
# check we can do a forward pass
__lowerCAmelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__lowerCAmelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__lowerCAmelCase = model(input_ids=lowercase , decoder_input_ids=lowercase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
__lowerCAmelCase = AutoTokenizer.from_pretrained("""t5-base""" )
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
__lowerCAmelCase = MusicgenProcessor(feature_extractor=lowercase , tokenizer=lowercase )
# set the appropriate bos/pad token ids
__lowerCAmelCase = 2048
__lowerCAmelCase = 2048
# set other default generation config params
__lowerCAmelCase = int(30 * audio_encoder.config.frame_rate )
__lowerCAmelCase = True
__lowerCAmelCase = 3.0
if pytorch_dump_folder is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(lowercase )
processor.save_pretrained(lowercase )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(lowercase )
processor.push_to_hub(lowercase )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
_a : List[Any] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 689 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : torch.FloatTensor
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE=True,):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = layers_per_block
__lowerCAmelCase = torch.nn.Convad(
__SCREAMING_SNAKE_CASE,block_out_channels[0],kernel_size=3,stride=1,padding=1,)
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
# down
__lowerCAmelCase = block_out_channels[0]
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = block_out_channels[i]
__lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase = get_down_block(
__SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,add_downsample=not is_final_block,resnet_eps=1e-6,downsample_padding=0,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
__lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""",attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
# out
__lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[-1],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = 2 * out_channels if double_z else out_channels
__lowerCAmelCase = nn.Convad(block_out_channels[-1],__SCREAMING_SNAKE_CASE,3,padding=1 )
__lowerCAmelCase = False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = x
__lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(""">=""","""1.11.0""" ):
for down_block in self.down_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
__lowerCAmelCase = down_block(__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE )
# post-process
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE="group",):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = layers_per_block
__lowerCAmelCase = nn.Convad(
__SCREAMING_SNAKE_CASE,block_out_channels[-1],kernel_size=3,stride=1,padding=1,)
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
__lowerCAmelCase = in_channels if norm_type == """spatial""" else None
# mid
__lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type,attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
# up
__lowerCAmelCase = list(reversed(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = reversed_block_out_channels[i]
__lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase = get_up_block(
__SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block + 1,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,prev_output_channel=__SCREAMING_SNAKE_CASE,add_upsample=not is_final_block,resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,resnet_time_scale_shift=__SCREAMING_SNAKE_CASE,)
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = output_channel
# out
if norm_type == "spatial":
__lowerCAmelCase = SpatialNorm(block_out_channels[0],__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[0],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = nn.Convad(block_out_channels[0],__SCREAMING_SNAKE_CASE,3,padding=1 )
__lowerCAmelCase = False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
__lowerCAmelCase = z
__lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(""">=""","""1.11.0""" ):
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
else:
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
else:
# middle
__lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = up_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="random",__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = n_e
__lowerCAmelCase = vq_embed_dim
__lowerCAmelCase = beta
__lowerCAmelCase = legacy
__lowerCAmelCase = nn.Embedding(self.n_e,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e,1.0 / self.n_e )
__lowerCAmelCase = remap
if self.remap is not None:
self.register_buffer("""used""",torch.tensor(np.load(self.remap ) ) )
__lowerCAmelCase = self.used.shape[0]
__lowerCAmelCase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__lowerCAmelCase = self.re_embed
__lowerCAmelCase = self.re_embed + 1
print(
f'Remapping {self.n_e} indices to {self.re_embed} indices. '
f'Using {self.unknown_index} for unknown indices.' )
else:
__lowerCAmelCase = n_e
__lowerCAmelCase = sane_index_shape
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
__lowerCAmelCase = inds.reshape(ishape[0],-1 )
__lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = (inds[:, :, None] == used[None, None, ...]).long()
__lowerCAmelCase = match.argmax(-1 )
__lowerCAmelCase = match.sum(2 ) < 1
if self.unknown_index == "random":
__lowerCAmelCase = torch.randint(0,self.re_embed,size=new[unknown].shape ).to(device=new.device )
else:
__lowerCAmelCase = self.unknown_index
return new.reshape(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
__lowerCAmelCase = inds.reshape(ishape[0],-1 )
__lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
__lowerCAmelCase = 0 # simply set to zero
__lowerCAmelCase = torch.gather(used[None, :][inds.shape[0] * [0], :],1,__SCREAMING_SNAKE_CASE )
return back.reshape(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = z.permute(0,2,3,1 ).contiguous()
__lowerCAmelCase = z.view(-1,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__lowerCAmelCase = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE,self.embedding.weight ),dim=1 )
__lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape )
__lowerCAmelCase = None
__lowerCAmelCase = None
# compute loss for embedding
if not self.legacy:
__lowerCAmelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__lowerCAmelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__lowerCAmelCase = z + (z_q - z).detach()
# reshape back to match original input shape
__lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous()
if self.remap is not None:
__lowerCAmelCase = min_encoding_indices.reshape(z.shape[0],-1 ) # add batch axis
__lowerCAmelCase = self.remap_to_used(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = min_encoding_indices.reshape(-1,1 ) # flatten
if self.sane_index_shape:
__lowerCAmelCase = min_encoding_indices.reshape(z_q.shape[0],z_q.shape[2],z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if self.remap is not None:
__lowerCAmelCase = indices.reshape(shape[0],-1 ) # add batch axis
__lowerCAmelCase = self.unmap_to_all(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE )
if shape is not None:
__lowerCAmelCase = z_q.view(__SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
__lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous()
return z_q
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = parameters
__lowerCAmelCase , __lowerCAmelCase = torch.chunk(__SCREAMING_SNAKE_CASE,2,dim=1 )
__lowerCAmelCase = torch.clamp(self.logvar,-30.0,20.0 )
__lowerCAmelCase = deterministic
__lowerCAmelCase = torch.exp(0.5 * self.logvar )
__lowerCAmelCase = torch.exp(self.logvar )
if self.deterministic:
__lowerCAmelCase = __lowerCAmelCase = torch.zeros_like(
self.mean,device=self.parameters.device,dtype=self.parameters.dtype )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
__lowerCAmelCase = randn_tensor(
self.mean.shape,generator=__SCREAMING_SNAKE_CASE,device=self.parameters.device,dtype=self.parameters.dtype )
__lowerCAmelCase = self.mean + self.std * sample
return x
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean,2 ) + self.var - 1.0 - self.logvar,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar,dim=[1, 2, 3],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
__lowerCAmelCase = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean,2 ) / self.var,dim=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.mean
| 689 |
'''simple docstring'''
from collections import deque
def _lowerCAmelCase ( lowercase ) -> Dict:
__lowerCAmelCase = len(lowercase )
__lowerCAmelCase = deque()
__lowerCAmelCase = [False for _ in range(lowercase )]
__lowerCAmelCase = [-1 for _ in range(lowercase )]
__lowerCAmelCase = index_of[:]
def strong_connect(lowercase , lowercase , lowercase ):
__lowerCAmelCase = index # the number when this node is seen
__lowerCAmelCase = index # lowest rank node reachable from here
index += 1
stack.append(lowercase )
__lowerCAmelCase = True
for w in g[v]:
if index_of[w] == -1:
__lowerCAmelCase = strong_connect(lowercase , lowercase , lowercase )
__lowerCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__lowerCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__lowerCAmelCase = []
__lowerCAmelCase = stack.pop()
__lowerCAmelCase = False
component.append(lowercase )
while w != v:
__lowerCAmelCase = stack.pop()
__lowerCAmelCase = False
component.append(lowercase )
components.append(lowercase )
return index
__lowerCAmelCase = []
for v in range(lowercase ):
if index_of[v] == -1:
strong_connect(lowercase , 0 , lowercase )
return components
def _lowerCAmelCase ( lowercase , lowercase ) -> str:
__lowerCAmelCase = [[] for _ in range(lowercase )]
for u, v in edges:
g[u].append(lowercase )
return g
if __name__ == "__main__":
# Test
_a : Any = 7
_a : Tuple = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_a : Optional[int] = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_a : Optional[Any] = [(u, v) for u, v in zip(source, target)]
_a : Optional[int] = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 689 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self ):
'''simple docstring'''
self.test()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = 0
__lowerCAmelCase = False
while not completed:
if counter == 1:
self.reset()
__lowerCAmelCase = self.advance()
if not self.does_advance(__SCREAMING_SNAKE_CASE ):
raise Exception(
"""Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.update(__SCREAMING_SNAKE_CASE )
counter += 1
if counter > 1_00_00:
raise Exception("""update() does not fulfill the constraint.""" )
if self.remaining() != 0:
raise Exception("""Custom Constraint is not defined correctly.""" )
@abstractmethod
def lowerCamelCase__ ( self ):
'''simple docstring'''
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__ ( self ):
'''simple docstring'''
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__ ( self ):
'''simple docstring'''
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super(__SCREAMING_SNAKE_CASE,self ).__init__()
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError(f'`token_ids` has to be a non-empty list, but is {token_ids}.' )
if any((not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.' )
__lowerCAmelCase = token_ids
__lowerCAmelCase = len(self.token_ids )
__lowerCAmelCase = -1 # the index of the currently fulfilled step
__lowerCAmelCase = False
def lowerCamelCase__ ( self ):
'''simple docstring'''
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(__SCREAMING_SNAKE_CASE )}' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(__SCREAMING_SNAKE_CASE )}' )
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
if self.does_advance(__SCREAMING_SNAKE_CASE ):
self.fulfilled_idx += 1
__lowerCAmelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
__lowerCAmelCase = True
__lowerCAmelCase = completed
else:
# failed to make progress.
__lowerCAmelCase = True
self.reset()
return stepped, completed, reset
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = False
__lowerCAmelCase = 0
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.seqlen - (self.fulfilled_idx + 1)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = PhrasalConstraint(self.token_ids )
if stateful:
__lowerCAmelCase = self.seqlen
__lowerCAmelCase = self.fulfilled_idx
__lowerCAmelCase = self.completed
return new_constraint
class _UpperCAmelCase :
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
__lowerCAmelCase = max([len(__SCREAMING_SNAKE_CASE ) for one in nested_token_ids] )
__lowerCAmelCase = {}
for token_ids in nested_token_ids:
__lowerCAmelCase = root
for tidx, token_id in enumerate(__SCREAMING_SNAKE_CASE ):
if token_id not in level:
__lowerCAmelCase = {}
__lowerCAmelCase = level[token_id]
if no_subsets and self.has_subsets(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Each list in `nested_token_ids` can't be a complete subset of another list, but is"""
f' {nested_token_ids}.' )
__lowerCAmelCase = root
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.trie
for current_token in current_seq:
__lowerCAmelCase = start[current_token]
__lowerCAmelCase = list(start.keys() )
return next_tokens
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.next_tokens(__SCREAMING_SNAKE_CASE )
return len(__SCREAMING_SNAKE_CASE ) == 0
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = list(root.values() )
if len(__SCREAMING_SNAKE_CASE ) == 0:
return 1
else:
return sum([self.count_leaves(__SCREAMING_SNAKE_CASE ) for nn in next_nodes] )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.count_leaves(__SCREAMING_SNAKE_CASE )
return len(__SCREAMING_SNAKE_CASE ) != leaf_count
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super(__SCREAMING_SNAKE_CASE,self ).__init__()
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError(f'`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.' )
if any(not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) for token_ids in nested_token_ids ):
raise ValueError(f'`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.' )
if any(
any((not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.' )
__lowerCAmelCase = DisjunctiveTrie(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = nested_token_ids
__lowerCAmelCase = self.trie.max_height
__lowerCAmelCase = []
__lowerCAmelCase = False
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.trie.next_tokens(self.current_seq )
if len(__SCREAMING_SNAKE_CASE ) == 0:
return None
else:
return token_list
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(__SCREAMING_SNAKE_CASE )}' )
__lowerCAmelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(__SCREAMING_SNAKE_CASE )}' )
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
if self.does_advance(__SCREAMING_SNAKE_CASE ):
self.current_seq.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = True
else:
__lowerCAmelCase = True
self.reset()
__lowerCAmelCase = self.trie.reached_leaf(self.current_seq )
__lowerCAmelCase = completed
return stepped, completed, reset
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = False
__lowerCAmelCase = []
def lowerCamelCase__ ( self ):
'''simple docstring'''
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
__lowerCAmelCase = self.seqlen
__lowerCAmelCase = self.current_seq
__lowerCAmelCase = self.completed
return new_constraint
class _UpperCAmelCase :
def __init__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = constraints
# max # of steps required to fulfill a given constraint
__lowerCAmelCase = max([c.seqlen for c in constraints] )
__lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = False
self.init_state()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = None
__lowerCAmelCase = [constraint.copy(stateful=__SCREAMING_SNAKE_CASE ) for constraint in self.constraints]
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__lowerCAmelCase = constraint.advance()
if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
token_list.append(__SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
token_list.extend(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = self.inprogress_constraint.advance()
if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
token_list.append(__SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
token_list.extend(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) == 0:
return None
else:
return token_list
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__lowerCAmelCase , __lowerCAmelCase = self.add(__SCREAMING_SNAKE_CASE )
# the entire list of constraints are fulfilled
if self.completed:
break
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
raise ValueError(f'`token_id` should be an `int`, but is `{token_id}`.' )
__lowerCAmelCase , __lowerCAmelCase = False, False
if self.completed:
__lowerCAmelCase = True
__lowerCAmelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.inprogress_constraint.update(__SCREAMING_SNAKE_CASE )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__lowerCAmelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
__lowerCAmelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = pending_constraint.update(__SCREAMING_SNAKE_CASE )
if not stepped:
raise Exception(
"""`constraint.update(token_id)` is not yielding incremental progress, """
"""even though `constraint.does_advance(token_id)` is true.""" )
if complete:
self.complete_constraints.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = None
if not complete and stepped:
__lowerCAmelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__lowerCAmelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__lowerCAmelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
__lowerCAmelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__lowerCAmelCase = [
constraint.copy(stateful=__SCREAMING_SNAKE_CASE ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__lowerCAmelCase = self.inprogress_constraint.copy(stateful=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 689 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ) -> Union[str, Any]:
__lowerCAmelCase = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
__lowerCAmelCase = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(lowercase )
# Let's go
__lowerCAmelCase = parser.parse_args()
if not hasattr(lowercase , """func""" ):
parser.print_help()
exit(1 )
# Run
__lowerCAmelCase = args.func(lowercase )
service.run()
if __name__ == "__main__":
main()
| 689 | 1 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
_a : Dict = """src/diffusers"""
# Matches is_xxx_available()
_a : List[Any] = re.compile(r"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
_a : List[Any] = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
_a : List[str] = """
{0} = None
"""
_a : str = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
_a : Tuple = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def _lowerCAmelCase ( lowercase ) -> Union[str, Any]:
__lowerCAmelCase = _re_backend.findall(lowercase )
if len(lowercase ) == 0:
return None
return "_and_".join(lowercase )
def _lowerCAmelCase ( ) -> List[Any]:
with open(os.path.join(lowercase , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__lowerCAmelCase = f.readlines()
# Get to the point we do the actual imports for type checking
__lowerCAmelCase = 0
__lowerCAmelCase = {}
# Go through the end of the file
while line_index < len(lowercase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__lowerCAmelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
__lowerCAmelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(lowercase ) and len(lines[line_index] ) > 1:
__lowerCAmelCase = lines[line_index]
__lowerCAmelCase = _re_single_line_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(lowercase ) > 0:
__lowerCAmelCase = objects
else:
line_index += 1
return backend_specific_objects
def _lowerCAmelCase ( lowercase , lowercase ) -> str:
if name.isupper():
return DUMMY_CONSTANT.format(lowercase )
elif name.islower():
return DUMMY_FUNCTION.format(lowercase , lowercase )
else:
return DUMMY_CLASS.format(lowercase , lowercase )
def _lowerCAmelCase ( lowercase=None ) -> str:
if backend_specific_objects is None:
__lowerCAmelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__lowerCAmelCase = {}
for backend, objects in backend_specific_objects.items():
__lowerCAmelCase = """[""" + """, """.join(f'"{b}"' for b in backend.split("""_and_""" ) ) + """]"""
__lowerCAmelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(lowercase , lowercase ) for o in objects] )
__lowerCAmelCase = dummy_file
return dummy_files
def _lowerCAmelCase ( lowercase=False ) -> Optional[int]:
__lowerCAmelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__lowerCAmelCase = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
__lowerCAmelCase = os.path.join(lowercase , """utils""" )
__lowerCAmelCase = {
backend: os.path.join(lowercase , f'dummy_{short_names.get(lowercase , lowercase )}_objects.py' )
for backend in dummy_files.keys()
}
__lowerCAmelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(lowercase ):
with open(lowercase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__lowerCAmelCase = f.read()
else:
__lowerCAmelCase = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'Updating diffusers.utils.dummy_{short_names.get(lowercase , lowercase )}_objects.py as the main '
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
f'diffusers.utils.dummy_{short_names.get(lowercase , lowercase )}_objects.py. Run `make fix-copies` '
"""to fix this.""" )
if __name__ == "__main__":
_a : str = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_a : Optional[int] = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 689 |
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_a : List[Any] = logging.get_logger(__name__)
_a : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
_a : Any = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> str:
for attribute in key.split(""".""" ):
__lowerCAmelCase = getattr(lowercase , lowercase )
if weight_type is not None:
__lowerCAmelCase = getattr(lowercase , lowercase ).shape
else:
__lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
__lowerCAmelCase = value
elif weight_type == "weight_g":
__lowerCAmelCase = value
elif weight_type == "weight_v":
__lowerCAmelCase = value
elif weight_type == "bias":
__lowerCAmelCase = value
else:
__lowerCAmelCase = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]:
__lowerCAmelCase = []
__lowerCAmelCase = fairseq_model.state_dict()
__lowerCAmelCase = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , )
__lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
__lowerCAmelCase = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split(""".""" )[:-1] ) != key):
# special case since naming is very similar
continue
__lowerCAmelCase = True
if "*" in mapped_key:
__lowerCAmelCase = name.split(lowercase )[0].split(""".""" )[-2]
__lowerCAmelCase = mapped_key.replace("""*""" , lowercase )
if "weight_g" in name:
__lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
__lowerCAmelCase = """weight_v"""
elif "bias" in name:
__lowerCAmelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase = """weight"""
else:
__lowerCAmelCase = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__lowerCAmelCase = full_name.split("""conv_layers.""" )[-1]
__lowerCAmelCase = name.split(""".""" )
__lowerCAmelCase = int(items[0] )
__lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Dict:
if config_path is not None:
__lowerCAmelCase = UniSpeechSatConfig.from_pretrained(lowercase )
else:
__lowerCAmelCase = UniSpeechSatConfig()
__lowerCAmelCase = """"""
if is_finetuned:
__lowerCAmelCase = UniSpeechSatForCTC(lowercase )
else:
__lowerCAmelCase = UniSpeechSatForPreTraining(lowercase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
__lowerCAmelCase = model[0].eval()
recursively_load_weights(lowercase , lowercase )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
_a : List[str] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_a : Union[str, Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 689 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase = 100 ) -> int:
__lowerCAmelCase = set()
__lowerCAmelCase = 0
__lowerCAmelCase = n + 1 # maximum limit
for a in range(2 , lowercase ):
for b in range(2 , lowercase ):
__lowerCAmelCase = a**b # calculates the current power
collect_powers.add(lowercase ) # adds the result to the set
return len(lowercase )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 689 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
_a : str = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
_a : Dict = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
_a : List[str] = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ),reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = spearmanr(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 689 | 1 |
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
_a : Tuple = TypeVar("""T""")
_a : Tuple = Union[List[T], Tuple[T, ...]]
_a : List[Any] = Union[T, List[T], Dict[str, T]]
_a : Tuple = Union[str, bytes, os.PathLike]
| 689 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=lowerCAmelCase_ ):
a : List[str] =["""onnx"""]
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(self,["""onnx"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""onnx"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""onnx"""] )
| 689 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase ) -> list:
__lowerCAmelCase = len(lowercase )
for _ in range(lowercase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__lowerCAmelCase , __lowerCAmelCase = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_a : int = list(range(1_0, 0, -1))
print(f'Original: {arr}. Sorted: {odd_even_transposition(arr)}')
| 689 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_a : int = logging.get_logger(__name__)
_a : Optional[int] = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[str] ="""gptj"""
a : Optional[int] ={
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self,__SCREAMING_SNAKE_CASE=5_04_00,__SCREAMING_SNAKE_CASE=20_48,__SCREAMING_SNAKE_CASE=40_96,__SCREAMING_SNAKE_CASE=28,__SCREAMING_SNAKE_CASE=16,__SCREAMING_SNAKE_CASE=64,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="gelu_new",__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=1e-5,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=False,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_embd
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_inner
__lowerCAmelCase = rotary_dim
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = use_cache
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(
bos_token_id=__SCREAMING_SNAKE_CASE,eos_token_id=__SCREAMING_SNAKE_CASE,tie_word_embeddings=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = "default",__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False,):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE,task=__SCREAMING_SNAKE_CASE,patching_specs=__SCREAMING_SNAKE_CASE,use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config,"""pad_token_id""",__SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
__lowerCAmelCase = 0
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE,direction="""inputs""" )
__lowerCAmelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._config.n_head
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,):
'''simple docstring'''
__lowerCAmelCase = super(__SCREAMING_SNAKE_CASE,self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE,batch_size=__SCREAMING_SNAKE_CASE,seq_length=__SCREAMING_SNAKE_CASE,is_pair=__SCREAMING_SNAKE_CASE,framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowerCAmelCase = seqlen + 2
__lowerCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCAmelCase = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
__lowerCAmelCase = common_inputs["""attention_mask"""]
if self.use_past:
__lowerCAmelCase = ordered_inputs["""attention_mask"""].dtype
__lowerCAmelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,dtype=__SCREAMING_SNAKE_CASE )],dim=1 )
return ordered_inputs
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 13
| 689 | 1 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_a : int = logging.get_logger(__name__)
# General docstring
_a : Any = """RegNetConfig"""
# Base docstring
_a : List[str] = """facebook/regnet-y-040"""
_a : Optional[int] = [1, 1_0_8_8, 7, 7]
# Image classification docstring
_a : Dict = """facebook/regnet-y-040"""
_a : str = """tabby, tabby cat"""
_a : List[str] = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = 3,__SCREAMING_SNAKE_CASE = 1,__SCREAMING_SNAKE_CASE = 1,__SCREAMING_SNAKE_CASE = "relu",):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = nn.Convad(
__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,kernel_size=__SCREAMING_SNAKE_CASE,stride=__SCREAMING_SNAKE_CASE,padding=kernel_size // 2,groups=__SCREAMING_SNAKE_CASE,bias=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = nn.BatchNormad(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.convolution(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.normalization(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.activation(__SCREAMING_SNAKE_CASE )
return hidden_state
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = RegNetConvLayer(
config.num_channels,config.embedding_size,kernel_size=3,stride=2,activation=config.hidden_act )
__lowerCAmelCase = config.num_channels
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
__lowerCAmelCase = self.embedder(__SCREAMING_SNAKE_CASE )
return hidden_state
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = 2 ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = nn.Convad(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,kernel_size=1,stride=__SCREAMING_SNAKE_CASE,bias=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = nn.BatchNormad(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.convolution(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.normalization(__SCREAMING_SNAKE_CASE )
return hidden_state
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = nn.AdaptiveAvgPoolad((1, 1) )
__lowerCAmelCase = nn.Sequential(
nn.Convad(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,kernel_size=1 ),nn.ReLU(),nn.Convad(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,kernel_size=1 ),nn.Sigmoid(),)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.pooler(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.attention(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = hidden_state * attention
return hidden_state
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = 1 ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = in_channels != out_channels or stride != 1
__lowerCAmelCase = max(1,out_channels // config.groups_width )
__lowerCAmelCase = (
RegNetShortCut(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,stride=__SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
__lowerCAmelCase = nn.Sequential(
RegNetConvLayer(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,kernel_size=1,activation=config.hidden_act ),RegNetConvLayer(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,stride=__SCREAMING_SNAKE_CASE,groups=__SCREAMING_SNAKE_CASE,activation=config.hidden_act ),RegNetConvLayer(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,kernel_size=1,activation=__SCREAMING_SNAKE_CASE ),)
__lowerCAmelCase = ACTaFN[config.hidden_act]
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = hidden_state
__lowerCAmelCase = self.layer(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.shortcut(__SCREAMING_SNAKE_CASE )
hidden_state += residual
__lowerCAmelCase = self.activation(__SCREAMING_SNAKE_CASE )
return hidden_state
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = 1 ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = in_channels != out_channels or stride != 1
__lowerCAmelCase = max(1,out_channels // config.groups_width )
__lowerCAmelCase = (
RegNetShortCut(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,stride=__SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
__lowerCAmelCase = nn.Sequential(
RegNetConvLayer(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,kernel_size=1,activation=config.hidden_act ),RegNetConvLayer(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,stride=__SCREAMING_SNAKE_CASE,groups=__SCREAMING_SNAKE_CASE,activation=config.hidden_act ),RegNetSELayer(__SCREAMING_SNAKE_CASE,reduced_channels=int(round(in_channels / 4 ) ) ),RegNetConvLayer(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,kernel_size=1,activation=__SCREAMING_SNAKE_CASE ),)
__lowerCAmelCase = ACTaFN[config.hidden_act]
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = hidden_state
__lowerCAmelCase = self.layer(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.shortcut(__SCREAMING_SNAKE_CASE )
hidden_state += residual
__lowerCAmelCase = self.activation(__SCREAMING_SNAKE_CASE )
return hidden_state
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = 2,__SCREAMING_SNAKE_CASE = 2,):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
__lowerCAmelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,stride=__SCREAMING_SNAKE_CASE,),*[layer(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) for _ in range(depth - 1 )],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.layers(__SCREAMING_SNAKE_CASE )
return hidden_state
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
__SCREAMING_SNAKE_CASE,config.embedding_size,config.hidden_sizes[0],stride=2 if config.downsample_in_first_stage else 1,depth=config.depths[0],) )
__lowerCAmelCase = zip(config.hidden_sizes,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__SCREAMING_SNAKE_CASE,config.depths[1:] ):
self.stages.append(RegNetStage(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,depth=__SCREAMING_SNAKE_CASE ) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = True ):
'''simple docstring'''
__lowerCAmelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowerCAmelCase = hidden_states + (hidden_state,)
__lowerCAmelCase = stage_module(__SCREAMING_SNAKE_CASE )
if output_hidden_states:
__lowerCAmelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__SCREAMING_SNAKE_CASE,hidden_states=__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Tuple =RegNetConfig
a : Union[str, Any] ="""regnet"""
a : Optional[int] ="""pixel_values"""
a : Union[str, Any] =True
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE,nn.Convad ):
nn.init.kaiming_normal_(module.weight,mode="""fan_out""",nonlinearity="""relu""" )
elif isinstance(__SCREAMING_SNAKE_CASE,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight,1 )
nn.init.constant_(module.bias,0 )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = value
_a : str = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
_a : Tuple = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , lowerCAmelCase_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = config
__lowerCAmelCase = RegNetEmbeddings(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = RegNetEncoder(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,output_type=__SCREAMING_SNAKE_CASE,config_class=_CONFIG_FOR_DOC,modality="""vision""",expected_output=_EXPECTED_OUTPUT_SHAPE,)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
__lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = self.embedder(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.encoder(
__SCREAMING_SNAKE_CASE,output_hidden_states=__SCREAMING_SNAKE_CASE,return_dict=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = encoder_outputs[0]
__lowerCAmelCase = self.pooler(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__SCREAMING_SNAKE_CASE,pooler_output=__SCREAMING_SNAKE_CASE,hidden_states=encoder_outputs.hidden_states,)
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , lowerCAmelCase_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = config.num_labels
__lowerCAmelCase = RegNetModel(__SCREAMING_SNAKE_CASE )
# classification head
__lowerCAmelCase = nn.Sequential(
nn.Flatten(),nn.Linear(config.hidden_sizes[-1],config.num_labels ) if config.num_labels > 0 else nn.Identity(),)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT,output_type=__SCREAMING_SNAKE_CASE,config_class=_CONFIG_FOR_DOC,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,):
'''simple docstring'''
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = self.regnet(__SCREAMING_SNAKE_CASE,output_hidden_states=__SCREAMING_SNAKE_CASE,return_dict=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = outputs.pooler_output if return_dict else outputs[1]
__lowerCAmelCase = self.classifier(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCAmelCase = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCAmelCase = """single_label_classification"""
else:
__lowerCAmelCase = """multi_label_classification"""
if self.config.problem_type == "regression":
__lowerCAmelCase = MSELoss()
if self.num_labels == 1:
__lowerCAmelCase = loss_fct(logits.squeeze(),labels.squeeze() )
else:
__lowerCAmelCase = loss_fct(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
elif self.config.problem_type == "single_label_classification":
__lowerCAmelCase = CrossEntropyLoss()
__lowerCAmelCase = loss_fct(logits.view(-1,self.num_labels ),labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowerCAmelCase = BCEWithLogitsLoss()
__lowerCAmelCase = loss_fct(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
if not return_dict:
__lowerCAmelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__SCREAMING_SNAKE_CASE,logits=__SCREAMING_SNAKE_CASE,hidden_states=outputs.hidden_states )
| 689 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase = 5000_0000 ) -> int:
__lowerCAmelCase = set()
__lowerCAmelCase = int((limit - 24) ** (1 / 2) )
__lowerCAmelCase = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowercase ) ) )
for primea in primes:
__lowerCAmelCase = primea * primea
for primea in primes:
__lowerCAmelCase = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
__lowerCAmelCase = primea * primea * primea * primea
__lowerCAmelCase = square + cube + tetr
if total >= limit:
break
ret.add(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(f'{solution() = }')
| 689 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_a : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowerCAmelCase ( lowercase ) -> Any:
warnings.warn(
"""The preprocess method is deprecated and will be removed in a future version. Please"""
""" use VaeImageProcessor.preprocess instead""" , lowercase , )
if isinstance(lowercase , torch.Tensor ):
return image
elif isinstance(lowercase , PIL.Image.Image ):
__lowerCAmelCase = [image]
if isinstance(image[0] , PIL.Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = image[0].size
__lowerCAmelCase , __lowerCAmelCase = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__lowerCAmelCase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
__lowerCAmelCase = np.concatenate(lowercase , axis=0 )
__lowerCAmelCase = np.array(lowercase ).astype(np.floataa ) / 2_55.0
__lowerCAmelCase = image.transpose(0 , 3 , 1 , 2 )
__lowerCAmelCase = 2.0 * image - 1.0
__lowerCAmelCase = torch.from_numpy(lowercase )
elif isinstance(image[0] , torch.Tensor ):
__lowerCAmelCase = torch.cat(lowercase , dim=0 )
return image
def _lowerCAmelCase ( lowercase ) -> Union[str, Any]:
if isinstance(lowercase , torch.Tensor ):
return mask
elif isinstance(lowercase , PIL.Image.Image ):
__lowerCAmelCase = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = mask[0].size
__lowerCAmelCase , __lowerCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__lowerCAmelCase = [np.array(m.convert("""L""" ).resize((w, h) , resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask]
__lowerCAmelCase = np.concatenate(lowercase , axis=0 )
__lowerCAmelCase = mask.astype(np.floataa ) / 2_55.0
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = torch.from_numpy(lowercase )
elif isinstance(mask[0] , torch.Tensor ):
__lowerCAmelCase = torch.cat(lowercase , dim=0 )
return mask
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : UNetaDModel
a : RePaintScheduler
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__SCREAMING_SNAKE_CASE,scheduler=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = 2_50,__SCREAMING_SNAKE_CASE = 0.0,__SCREAMING_SNAKE_CASE = 10,__SCREAMING_SNAKE_CASE = 10,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = "pil",__SCREAMING_SNAKE_CASE = True,):
'''simple docstring'''
__lowerCAmelCase = image
__lowerCAmelCase = _preprocess_image(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = original_image.to(device=self.device,dtype=self.unet.dtype )
__lowerCAmelCase = _preprocess_mask(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = mask_image.to(device=self.device,dtype=self.unet.dtype )
__lowerCAmelCase = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(__SCREAMING_SNAKE_CASE )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
__lowerCAmelCase = original_image.shape
__lowerCAmelCase = randn_tensor(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,device=self.device,dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,self.device )
__lowerCAmelCase = eta
__lowerCAmelCase = self.scheduler.timesteps[0] + 1
__lowerCAmelCase = generator[0] if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__lowerCAmelCase = self.unet(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ).sample
# compute previous image: x_t -> x_t-1
__lowerCAmelCase = self.scheduler.step(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__lowerCAmelCase = self.scheduler.undo_step(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = t
__lowerCAmelCase = (image / 2 + 0.5).clamp(0,1 )
__lowerCAmelCase = image.cpu().permute(0,2,3,1 ).numpy()
if output_type == "pil":
__lowerCAmelCase = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 689 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Optional[int] =TextToVideoSDPipeline
a : Optional[int] =TEXT_TO_IMAGE_PARAMS
a : Any =TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a : Union[str, Any] =frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D"""),up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D"""),cross_attention_dim=32,attention_head_dim=4,)
__lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0085,beta_end=0.012,beta_schedule="""scaled_linear""",clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,)
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],latent_channels=4,sample_size=1_28,)
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1e-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=10_00,hidden_act="""gelu""",projection_dim=5_12,)
__lowerCAmelCase = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = TextToVideoSDPipeline(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = """np"""
__lowerCAmelCase = sd_pipe(**__SCREAMING_SNAKE_CASE ).frames
__lowerCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__lowerCAmelCase = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available(),reason="""XFormers attention is only available with CUDA and `xformers` installed""",)
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
__lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowerCAmelCase = pipe.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=25,output_type="""pt""" ).frames
__lowerCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
__lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase = pipe.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=2,output_type="""pt""" ).frames
__lowerCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 689 | 1 |
'''simple docstring'''
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
_a : List[str] = getLogger(__name__)
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase = 8 , lowercase = 1024 , lowercase="val" , lowercase=None , lowercase=False , lowercase="summarization" , lowercase=None , lowercase=1 , lowercase = None , lowercase="" , **lowercase , ) -> Dict:
__lowerCAmelCase = str(lowercase )
assert local_rank is not None
torch.distributed.init_process_group(backend="""nccl""" , rank=lowercase )
__lowerCAmelCase = Path(lowercase )
__lowerCAmelCase = save_dir.joinpath(f'rank_{local_rank}_output.json' )
torch.cuda.set_device(lowercase )
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(lowercase ).cuda()
if fpaa:
__lowerCAmelCase = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowercase , lowercase ) # update config with task specific params
__lowerCAmelCase = generate_kwargs.pop("""num_beams""" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
__lowerCAmelCase = num_return_sequences
__lowerCAmelCase = AutoTokenizer.from_pretrained(lowercase )
logger.info(f'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
if max_source_length is None:
__lowerCAmelCase = tokenizer.model_max_length
if prefix is None:
__lowerCAmelCase = prefix or getattr(model.config , """prefix""" , """""" ) or """"""
__lowerCAmelCase = SeqaSeqDataset(
lowercase , lowercase , lowercase , max_target_length=1024 , type_path=lowercase , n_obs=lowercase , prefix=lowercase , **lowercase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
__lowerCAmelCase = ds.make_sortish_sampler(lowercase , distributed=lowercase , add_extra_examples=lowercase , shuffle=lowercase )
__lowerCAmelCase = DataLoader(lowercase , sampler=lowercase , batch_size=lowercase , collate_fn=ds.collate_fn )
__lowerCAmelCase = []
for batch in tqdm(lowercase ):
__lowerCAmelCase = model.generate(
input_ids=batch["""input_ids"""].to(model.device ) , attention_mask=batch["""attention_mask"""].to(model.device ) , num_return_sequences=lowercase , num_beams=lowercase , **lowercase , )
__lowerCAmelCase = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
__lowerCAmelCase = batch["""ids"""]
if num_return_sequences > 1:
__lowerCAmelCase = chunks(lowercase , lowercase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowercase ):
results.append({"""pred""": pred, """id""": ids[i].item()} )
save_json(lowercase , lowercase )
return results, sampler.num_replicas
def _lowerCAmelCase ( ) -> List[Any]:
__lowerCAmelCase = argparse.ArgumentParser(
epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" )
parser.add_argument("""--data_dir""" , type=lowercase , help="""like cnn_dm/test.source""" )
parser.add_argument(
"""--model_name""" , type=lowercase , help="""like facebook/bart-large-cnn,t5-base, etc.""" , default="""sshleifer/distilbart-xsum-12-3""" , )
parser.add_argument("""--save_dir""" , type=lowercase , help="""where to save""" , default="""tmp_gen""" )
parser.add_argument("""--max_source_length""" , type=lowercase , default=lowercase )
parser.add_argument(
"""--type_path""" , type=lowercase , default="""test""" , help="""which subset to evaluate typically train/val/test""" )
parser.add_argument("""--task""" , type=lowercase , default="""summarization""" , help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" , type=lowercase , default=8 , required=lowercase , help="""batch size""" )
parser.add_argument(
"""--local_rank""" , type=lowercase , default=-1 , required=lowercase , help="""should be passed by distributed.launch""" )
parser.add_argument(
"""--n_obs""" , type=lowercase , default=lowercase , required=lowercase , help="""How many observations. Defaults to all.""" )
parser.add_argument(
"""--num_return_sequences""" , type=lowercase , default=1 , required=lowercase , help="""How many sequences to return""" )
parser.add_argument(
"""--sync_timeout""" , type=lowercase , default=600 , required=lowercase , help="""How long should master process wait for other processes to finish.""" , )
parser.add_argument("""--src_lang""" , type=lowercase , default=lowercase , required=lowercase )
parser.add_argument("""--tgt_lang""" , type=lowercase , default=lowercase , required=lowercase )
parser.add_argument(
"""--prefix""" , type=lowercase , required=lowercase , default=lowercase , help="""will be added to the begininng of src examples""" )
parser.add_argument("""--fp16""" , action="""store_true""" )
parser.add_argument("""--debug""" , action="""store_true""" )
__lowerCAmelCase = time.time()
__lowerCAmelCase , __lowerCAmelCase = parser.parse_known_args()
__lowerCAmelCase = parse_numeric_n_bool_cl_kwargs(lowercase )
if generate_kwargs and args.local_rank <= 0:
print(f'parsed the following generate kwargs: {generate_kwargs}' )
__lowerCAmelCase = Path(args.save_dir + """_tmp""" )
Path(lowercase ).mkdir(exist_ok=lowercase ) # this handles locking.
__lowerCAmelCase = list(json_save_dir.glob("""rank_*.json""" ) )
if intermediate_files:
raise ValueError(f'Found files at {json_save_dir} please move or remove them.' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
__lowerCAmelCase = {}
if args.src_lang is not None:
__lowerCAmelCase = args.src_lang
if args.tgt_lang is not None:
__lowerCAmelCase = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=lowercase )
__lowerCAmelCase , __lowerCAmelCase = eval_data_dir(
args.data_dir , lowercase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=lowercase , **lowercase , )
if args.local_rank <= 0:
__lowerCAmelCase = Path(args.save_dir )
save_dir.mkdir(exist_ok=lowercase )
__lowerCAmelCase = gather_results_from_each_node(lowercase , lowercase , args.sync_timeout )
__lowerCAmelCase = combine_partial_results(lowercase )
if args.num_return_sequences > 1:
__lowerCAmelCase = save_dir.joinpath("""pseudolabel_results.json""" )
print(f'Saving aggregated results at {save_path}, intermediate in {json_save_dir}/' )
save_json(lowercase , lowercase )
return
__lowerCAmelCase = Path(args.data_dir ).joinpath(args.type_path + """.target""" )
with open(lowercase ) as f:
__lowerCAmelCase = [x.rstrip() for x in f.readlines()][: len(lowercase )]
# Calculate metrics, save metrics, and save _generations.txt
__lowerCAmelCase = """translation""" in args.task
__lowerCAmelCase = calculate_bleu if calc_bleu else calculate_rouge
__lowerCAmelCase = """bleu""" if calc_bleu else """rouge"""
__lowerCAmelCase = score_fn(lowercase , lowercase )
__lowerCAmelCase = len(lowercase )
__lowerCAmelCase = time.time() - start_time
__lowerCAmelCase = round(runtime / metrics["""n_obs"""] , 4 )
__lowerCAmelCase = num_replicas
# TODO(@stas00): add whatever metadata to metrics
__lowerCAmelCase = save_dir.joinpath(f'{args.type_path}_{metric_name}.json' )
save_json(lowercase , lowercase , indent=lowercase )
print(lowercase )
write_txt_file(lowercase , save_dir.joinpath(f'{args.type_path}_generations.txt' ) )
if args.debug:
write_txt_file(lowercase , save_dir.joinpath(f'{args.type_path}.target' ) )
else:
shutil.rmtree(lowercase )
def _lowerCAmelCase ( lowercase ) -> List:
__lowerCAmelCase = []
for partial_result in partial_results:
records.extend(lowercase )
__lowerCAmelCase = sorted(lowercase , key=lambda lowercase : x["id"] )
__lowerCAmelCase = [x["""pred"""] for x in records]
return preds
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> List[Dict[str, List]]:
# WAIT FOR lots of .json files
__lowerCAmelCase = time.time()
logger.info("""waiting for all nodes to finish""" )
__lowerCAmelCase = None
while (time.time() - start_wait) < timeout:
__lowerCAmelCase = list(save_dir.glob("""rank_*.json""" ) )
if len(lowercase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
__lowerCAmelCase = lmap(lowercase , lowercase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("""Rank 0 gave up on waiting for other processes""" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 689 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _lowerCAmelCase ( lowercase ) -> Optional[int]:
if not is_accelerate_available():
return method
__lowerCAmelCase = version.parse(accelerate.__version__ ).base_version
if version.parse(lowercase ) < version.parse("""0.17.0""" ):
return method
def wrapper(self , *lowercase , **lowercase ):
if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ):
self._hf_hook.pre_forward(self )
return method(self , *lowercase , **lowercase )
return wrapper
| 689 | 1 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def _lowerCAmelCase ( lowercase = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2
def _lowerCAmelCase ( lowercase = "" ) -> bool:
if len(lowercase ) == 0:
return True
__lowerCAmelCase = input_str.replace(""" """ , """""" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__lowerCAmelCase = {}
for character in lower_case_input_str:
__lowerCAmelCase = character_freq_dict.get(lowercase , 0 ) + 1
__lowerCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _lowerCAmelCase ( lowercase = "" ) -> None:
print("""\nFor string = """ , lowercase , """:""" )
print(
"""> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(lowercase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
print(
"""> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(lowercase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
if __name__ == "__main__":
_a : int = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
_a : Optional[int] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
| 689 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
# load base model
__lowerCAmelCase = StableDiffusionPipeline.from_pretrained(lowercase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
__lowerCAmelCase = load_file(lowercase )
__lowerCAmelCase = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
__lowerCAmelCase = pipeline.text_encoder
else:
__lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
__lowerCAmelCase = pipeline.unet
# find the target layer
__lowerCAmelCase = layer_infos.pop(0 )
while len(lowercase ) > -1:
try:
__lowerCAmelCase = curr_layer.__getattr__(lowercase )
if len(lowercase ) > 0:
__lowerCAmelCase = layer_infos.pop(0 )
elif len(lowercase ) == 0:
break
except Exception:
if len(lowercase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
__lowerCAmelCase = layer_infos.pop(0 )
__lowerCAmelCase = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(lowercase )
else:
pair_keys.append(lowercase )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
__lowerCAmelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
__lowerCAmelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase ).unsqueeze(2 ).unsqueeze(3 )
else:
__lowerCAmelCase = state_dict[pair_keys[0]].to(torch.floataa )
__lowerCAmelCase = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase )
# update visited list
for item in pair_keys:
visited.append(lowercase )
return pipeline
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
_a : Optional[int] = parser.parse_args()
_a : Dict = args.base_model_path
_a : Optional[Any] = args.checkpoint_path
_a : Union[str, Any] = args.dump_path
_a : Optional[int] = args.lora_prefix_unet
_a : int = args.lora_prefix_text_encoder
_a : str = args.alpha
_a : Any = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_a : Tuple = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 689 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_a : Optional[Any] = 1_6
_a : Any = 3_2
def _lowerCAmelCase ( lowercase , lowercase = 16 ) -> str:
__lowerCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__lowerCAmelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
__lowerCAmelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCAmelCase = datasets.map(
lowercase , batched=lowercase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCAmelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCAmelCase = 16
elif accelerator.mixed_precision != "no":
__lowerCAmelCase = 8
else:
__lowerCAmelCase = None
return tokenizer.pad(
lowercase , padding="""longest""" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="""pt""" , )
# Instantiate dataloaders.
__lowerCAmelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
__lowerCAmelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_a : int = mocked_dataloaders # noqa: F811
def _lowerCAmelCase ( lowercase , lowercase ) -> Tuple:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase ) == "1":
__lowerCAmelCase = 2
# Initialize accelerator
__lowerCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase = config["""lr"""]
__lowerCAmelCase = int(config["""num_epochs"""] )
__lowerCAmelCase = int(config["""seed"""] )
__lowerCAmelCase = int(config["""batch_size"""] )
__lowerCAmelCase = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
__lowerCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__lowerCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
__lowerCAmelCase = MAX_GPU_BATCH_SIZE
set_seed(lowercase )
__lowerCAmelCase , __lowerCAmelCase = get_dataloaders(lowercase , lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
__lowerCAmelCase = AdamW(params=model.parameters() , lr=lowercase )
# Instantiate scheduler
__lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowerCAmelCase = model(**lowercase )
__lowerCAmelCase = outputs.loss
__lowerCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
__lowerCAmelCase = 0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCAmelCase = model(**lowercase )
__lowerCAmelCase = outputs.logits.argmax(dim=-1 )
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather((predictions, batch["""labels"""]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(lowercase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
__lowerCAmelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowerCAmelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=lowercase , references=lowercase , )
__lowerCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , lowercase )
def _lowerCAmelCase ( ) -> Optional[Any]:
__lowerCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase , default=lowercase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 689 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def _lowerCAmelCase ( lowercase = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2
def _lowerCAmelCase ( lowercase = "" ) -> bool:
if len(lowercase ) == 0:
return True
__lowerCAmelCase = input_str.replace(""" """ , """""" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__lowerCAmelCase = {}
for character in lower_case_input_str:
__lowerCAmelCase = character_freq_dict.get(lowercase , 0 ) + 1
__lowerCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _lowerCAmelCase ( lowercase = "" ) -> None:
print("""\nFor string = """ , lowercase , """:""" )
print(
"""> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(lowercase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
print(
"""> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(lowercase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
if __name__ == "__main__":
_a : int = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
_a : Optional[int] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
| 689 | 1 |
'''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class _UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
a : Tuple =1
@register_to_config
def __init__( self,__SCREAMING_SNAKE_CASE = 10_00,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
self.set_timesteps(__SCREAMING_SNAKE_CASE )
# standard deviation of the initial noise distribution
__lowerCAmelCase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__lowerCAmelCase = 4
# running values
__lowerCAmelCase = []
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
__lowerCAmelCase = num_inference_steps
__lowerCAmelCase = torch.linspace(1,0,num_inference_steps + 1 )[:-1]
__lowerCAmelCase = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__lowerCAmelCase = torch.tensor(self.config.trained_betas,dtype=torch.floataa )
else:
__lowerCAmelCase = torch.sin(steps * math.pi / 2 ) ** 2
__lowerCAmelCase = (1.0 - self.betas**2) ** 0.5
__lowerCAmelCase = (torch.atana(self.betas,self.alphas ) / math.pi * 2)[:-1]
__lowerCAmelCase = timesteps.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = []
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = True,):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__lowerCAmelCase = (self.timesteps == timestep).nonzero().item()
__lowerCAmelCase = timestep_index + 1
__lowerCAmelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__SCREAMING_SNAKE_CASE )
if len(self.ets ) == 1:
__lowerCAmelCase = self.ets[-1]
elif len(self.ets ) == 2:
__lowerCAmelCase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__lowerCAmelCase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__lowerCAmelCase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__lowerCAmelCase = self._get_prev_sample(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return sample
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.alphas[timestep_index]
__lowerCAmelCase = self.betas[timestep_index]
__lowerCAmelCase = self.alphas[prev_timestep_index]
__lowerCAmelCase = self.betas[prev_timestep_index]
__lowerCAmelCase = (sample - sigma * ets) / max(__SCREAMING_SNAKE_CASE,1e-8 )
__lowerCAmelCase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 689 |
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _lowerCAmelCase ( lowercase ) -> List[Any]:
__lowerCAmelCase = VideoMAEConfig()
set_architecture_configs(lowercase , lowercase )
if "finetuned" not in model_name:
__lowerCAmelCase = False
if "finetuned" in model_name:
__lowerCAmelCase = """huggingface/label-files"""
if "kinetics" in model_name:
__lowerCAmelCase = 400
__lowerCAmelCase = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
__lowerCAmelCase = 174
__lowerCAmelCase = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
__lowerCAmelCase = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="""dataset""" ) , """r""" ) )
__lowerCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase ( lowercase , lowercase ) -> Any:
if "small" in model_name:
__lowerCAmelCase = 384
__lowerCAmelCase = 1536
__lowerCAmelCase = 12
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 3
__lowerCAmelCase = 192
__lowerCAmelCase = 768
elif "large" in model_name:
__lowerCAmelCase = 1024
__lowerCAmelCase = 4096
__lowerCAmelCase = 24
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 8
__lowerCAmelCase = 512
__lowerCAmelCase = 2048
elif "huge" in model_name:
__lowerCAmelCase = 1280
__lowerCAmelCase = 5120
__lowerCAmelCase = 32
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 8
__lowerCAmelCase = 640
__lowerCAmelCase = 2560
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def _lowerCAmelCase ( lowercase ) -> List[str]:
if "encoder." in name:
__lowerCAmelCase = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
__lowerCAmelCase = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
__lowerCAmelCase = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
__lowerCAmelCase = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__lowerCAmelCase = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
__lowerCAmelCase = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
__lowerCAmelCase = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
__lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
__lowerCAmelCase = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
__lowerCAmelCase = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
__lowerCAmelCase = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
__lowerCAmelCase = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
__lowerCAmelCase = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__lowerCAmelCase = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__lowerCAmelCase = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
__lowerCAmelCase = name.replace("""head""" , """classifier""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]:
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(lowercase )
if key.startswith("""encoder.""" ):
__lowerCAmelCase = key.replace("""encoder.""" , """""" )
if "qkv" in key:
__lowerCAmelCase = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
__lowerCAmelCase = config.decoder_hidden_size
__lowerCAmelCase = int(key_split[2] )
__lowerCAmelCase = """decoder.decoder_layers."""
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = config.hidden_size
__lowerCAmelCase = int(key_split[1] )
__lowerCAmelCase = """videomae.encoder.layer."""
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val
return orig_state_dict
def _lowerCAmelCase ( ) -> str:
__lowerCAmelCase = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__lowerCAmelCase = np.load(lowercase )
return list(lowercase )
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__lowerCAmelCase = get_videomae_config(lowercase )
if "finetuned" in model_name:
__lowerCAmelCase = VideoMAEForVideoClassification(lowercase )
else:
__lowerCAmelCase = VideoMAEForPreTraining(lowercase )
# download original checkpoint, hosted on Google Drive
__lowerCAmelCase = """pytorch_model.bin"""
gdown.cached_download(lowercase , lowercase , quiet=lowercase )
__lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" )
if "model" in files:
__lowerCAmelCase = files["""model"""]
else:
__lowerCAmelCase = files["""module"""]
__lowerCAmelCase = convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
# verify model on basic input
__lowerCAmelCase = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__lowerCAmelCase = prepare_video()
__lowerCAmelCase = image_processor(lowercase , return_tensors="""pt""" )
if "finetuned" not in model_name:
__lowerCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
__lowerCAmelCase = torch.load(lowercase )
__lowerCAmelCase = model(**lowercase )
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
__lowerCAmelCase = torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(f'Model name not supported. Should be one of {model_names}' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , lowercase , atol=1e-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__lowerCAmelCase = outputs.loss
assert torch.allclose(lowercase , lowercase , atol=1e-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model and image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase )
model.save_pretrained(lowercase )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase , organization="""nielsr""" )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_a : int = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 689 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : Union[str, Any] = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def _lowerCAmelCase ( lowercase , lowercase , lowercase=8 ) -> Optional[int]:
__lowerCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__lowerCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=__SCREAMING_SNAKE_CASE,scheduler=__SCREAMING_SNAKE_CASE,movq=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if latents is None:
__lowerCAmelCase = randn_tensor(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,device=__SCREAMING_SNAKE_CASE,dtype=__SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
__lowerCAmelCase = latents.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
__lowerCAmelCase = torch.device(f'cuda:{gpu_id}' )
__lowerCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(""">=""","""0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
__lowerCAmelCase = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""",silence_dtype_warnings=__SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
__lowerCAmelCase , __lowerCAmelCase = cpu_offload_with_hook(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,prev_module_hook=__SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
__lowerCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase__ ( self ):
'''simple docstring'''
if not hasattr(self.unet,"""_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__SCREAMING_SNAKE_CASE,"""_hf_hook""" )
and hasattr(module._hf_hook,"""execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__SCREAMING_SNAKE_CASE )
def __call__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = 5_12,__SCREAMING_SNAKE_CASE = 5_12,__SCREAMING_SNAKE_CASE = 1_00,__SCREAMING_SNAKE_CASE = 4.0,__SCREAMING_SNAKE_CASE = 1,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = "pil",__SCREAMING_SNAKE_CASE = True,):
'''simple docstring'''
__lowerCAmelCase = self._execution_device
__lowerCAmelCase = guidance_scale > 1.0
if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = torch.cat(__SCREAMING_SNAKE_CASE,dim=0 )
if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = torch.cat(__SCREAMING_SNAKE_CASE,dim=0 )
if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = torch.cat(__SCREAMING_SNAKE_CASE,dim=0 )
__lowerCAmelCase = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__lowerCAmelCase = image_embeds.repeat_interleave(__SCREAMING_SNAKE_CASE,dim=0 )
__lowerCAmelCase = negative_image_embeds.repeat_interleave(__SCREAMING_SNAKE_CASE,dim=0 )
__lowerCAmelCase = hint.repeat_interleave(__SCREAMING_SNAKE_CASE,dim=0 )
__lowerCAmelCase = torch.cat([negative_image_embeds, image_embeds],dim=0 ).to(dtype=self.unet.dtype,device=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.cat([hint, hint],dim=0 ).to(dtype=self.unet.dtype,device=__SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE,device=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.scheduler.timesteps
__lowerCAmelCase = self.movq.config.latent_channels
__lowerCAmelCase , __lowerCAmelCase = downscale_height_and_width(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,self.movq_scale_factor )
# create initial latent
__lowerCAmelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width),image_embeds.dtype,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,self.scheduler,)
for i, t in enumerate(self.progress_bar(__SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
__lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCAmelCase = {"""image_embeds""": image_embeds, """hint""": hint}
__lowerCAmelCase = self.unet(
sample=__SCREAMING_SNAKE_CASE,timestep=__SCREAMING_SNAKE_CASE,encoder_hidden_states=__SCREAMING_SNAKE_CASE,added_cond_kwargs=__SCREAMING_SNAKE_CASE,return_dict=__SCREAMING_SNAKE_CASE,)[0]
if do_classifier_free_guidance:
__lowerCAmelCase , __lowerCAmelCase = noise_pred.split(latents.shape[1],dim=1 )
__lowerCAmelCase , __lowerCAmelCase = noise_pred.chunk(2 )
__lowerCAmelCase , __lowerCAmelCase = variance_pred.chunk(2 )
__lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCAmelCase = torch.cat([noise_pred, variance_pred_text],dim=1 )
if not (
hasattr(self.scheduler.config,"""variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCAmelCase , __lowerCAmelCase = noise_pred.split(latents.shape[1],dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCAmelCase = self.scheduler.step(
__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,)[0]
# post-processing
__lowerCAmelCase = self.movq.decode(__SCREAMING_SNAKE_CASE,force_not_quantize=__SCREAMING_SNAKE_CASE )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
__lowerCAmelCase = image * 0.5 + 0.5
__lowerCAmelCase = image.clamp(0,1 )
__lowerCAmelCase = image.cpu().permute(0,2,3,1 ).float().numpy()
if output_type == "pil":
__lowerCAmelCase = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 689 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_a : Tuple = """\
"""
_a : Tuple = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
_a : Optional[Any] = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ),reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = 16,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__lowerCAmelCase = """cuda"""
else:
__lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__lowerCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__SCREAMING_SNAKE_CASE ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__lowerCAmelCase = model.config.max_length - 1
else:
__lowerCAmelCase = model.config.max_length
__lowerCAmelCase = tokenizer(
__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,padding=__SCREAMING_SNAKE_CASE,truncation=__SCREAMING_SNAKE_CASE,max_length=__SCREAMING_SNAKE_CASE,return_tensors="""pt""",return_attention_mask=__SCREAMING_SNAKE_CASE,).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = encodings["""input_ids"""]
__lowerCAmelCase = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ),1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ),2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__lowerCAmelCase = []
__lowerCAmelCase = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0,len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase = min(start_index + batch_size,len(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = encoded_texts[start_index:end_index]
__lowerCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
__lowerCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch],dim=1 )
__lowerCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size(),dtype=torch.intaa ).to(__SCREAMING_SNAKE_CASE ), attn_mask],dim=1 )
__lowerCAmelCase = encoded_batch
with torch.no_grad():
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE ).logits
__lowerCAmelCase = out_logits[..., :-1, :].contiguous()
__lowerCAmelCase = labels[..., 1:].contiguous()
__lowerCAmelCase = attn_mask[..., 1:].contiguous()
__lowerCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1,2 ),__SCREAMING_SNAKE_CASE ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__SCREAMING_SNAKE_CASE )}
| 689 | 1 |
'''simple docstring'''
from math import factorial
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> float:
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(lowercase , lowercase ) or not isinstance(lowercase , lowercase ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
__lowerCAmelCase = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
__lowerCAmelCase = float(factorial(lowercase ) )
coefficient /= factorial(lowercase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("""Probability of 2 successes out of 4 trails""")
print("""with probability of 0.75 is:""", end=""" """)
print(binomial_distribution(2, 4, 0.75))
| 689 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Union[str, Any] =["""image_processor"""]
a : Dict ="""SamImageProcessor"""
def __init__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.image_processor
__lowerCAmelCase = -10
__lowerCAmelCase = self.image_processor.size["""longest_edge"""]
def __call__( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = self.image_processor(
__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
# pop arguments that are not used in the foward but used nevertheless
__lowerCAmelCase = encoding_image_processor["""original_sizes"""]
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks if Torch or TF tensor
__lowerCAmelCase = original_sizes.numpy()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._check_and_preprocess_points(
input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = self._normalize_and_convert(
__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,)
return encoding_image_processor
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="pt",):
'''simple docstring'''
if input_points is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0] ) for point in input_points
]
else:
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
for point, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__lowerCAmelCase , __lowerCAmelCase = self._pad_points_and_labels(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_labels is not None:
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0],is_bounding_box=__SCREAMING_SNAKE_CASE )
for box in input_boxes
]
else:
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,is_bounding_box=__SCREAMING_SNAKE_CASE )
for box, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
]
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
__lowerCAmelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points} )
if input_labels is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels} )
return encoding_image_processor
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = max([point.shape[0] for point in input_points] )
__lowerCAmelCase = []
for i, point in enumerate(__SCREAMING_SNAKE_CASE ):
if point.shape[0] != expected_nb_points:
__lowerCAmelCase = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value],axis=0 )
__lowerCAmelCase = np.append(input_labels[i],[self.point_pad_value] )
processed_input_points.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = processed_input_points
return input_points, input_labels
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = original_size
__lowerCAmelCase , __lowerCAmelCase = self.image_processor._get_preprocess_shape(__SCREAMING_SNAKE_CASE,longest_edge=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = deepcopy(__SCREAMING_SNAKE_CASE ).astype(__SCREAMING_SNAKE_CASE )
if is_bounding_box:
__lowerCAmelCase = coords.reshape(-1,2,2 )
__lowerCAmelCase = coords[..., 0] * (new_w / old_w)
__lowerCAmelCase = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__lowerCAmelCase = coords.reshape(-1,4 )
return coords
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,):
'''simple docstring'''
if input_points is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks for TF or Torch tensor
__lowerCAmelCase = input_points.numpy().tolist()
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_points[0],__SCREAMING_SNAKE_CASE ):
raise ValueError("""Input points must be a list of list of floating points.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for input_point in input_points]
else:
__lowerCAmelCase = None
if input_labels is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ):
__lowerCAmelCase = input_labels.numpy().tolist()
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_labels[0],__SCREAMING_SNAKE_CASE ):
raise ValueError("""Input labels must be a list of list integers.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for label in input_labels]
else:
__lowerCAmelCase = None
if input_boxes is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ):
__lowerCAmelCase = input_boxes.numpy().tolist()
if (
not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0],__SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0][0],__SCREAMING_SNAKE_CASE )
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa ) for box in input_boxes]
else:
__lowerCAmelCase = None
return input_points, input_labels, input_boxes
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(__SCREAMING_SNAKE_CASE ) )
def lowerCamelCase__ ( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.image_processor.post_process_masks(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
| 689 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase ) -> list:
__lowerCAmelCase = int(lowercase )
if n_element < 1:
__lowerCAmelCase = ValueError("""a should be a positive number""" )
raise my_error
__lowerCAmelCase = [1]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = (0, 0, 0)
__lowerCAmelCase = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_a : Optional[Any] = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
_a : int = hamming(int(n))
print("""-----------------------------------------------------""")
print(f'The list with nth numbers is: {hamming_numbers}')
print("""-----------------------------------------------------""")
| 689 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
_a : int = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
a : Optional[str] =field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
a : Optional[str] =field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
a : int =field(
default=10_24 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
a : bool =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the training data."""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the validation data."""} )
a : Optional[str] =field(default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the test data."""} )
def lowerCamelCase__ ( self ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
__lowerCAmelCase = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__lowerCAmelCase = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _UpperCAmelCase :
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a : str =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def _lowerCAmelCase ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
__lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(lowercase )
datasets.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__lowerCAmelCase = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__lowerCAmelCase = data_args.train_file.split(""".""" )[-1]
__lowerCAmelCase = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__lowerCAmelCase = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(f'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
__lowerCAmelCase = load_dataset("""csv""" , data_files=lowercase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__lowerCAmelCase = load_dataset("""json""" , data_files=lowercase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__lowerCAmelCase = raw_datasets["""train"""].features["""label"""].names
__lowerCAmelCase = len(lowercase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__lowerCAmelCase = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowercase , )
__lowerCAmelCase = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__lowerCAmelCase = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__lowerCAmelCase = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__lowerCAmelCase = {"""Refused""": 0, """Entailed""": 1}
__lowerCAmelCase = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
__lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowercase ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowercase ):
__lowerCAmelCase = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
__lowerCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__lowerCAmelCase = examples["""statement"""]
__lowerCAmelCase = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
__lowerCAmelCase = tokenizer(lowercase , lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase )
__lowerCAmelCase = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
__lowerCAmelCase = raw_datasets.map(
lowercase , batched=lowercase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__lowerCAmelCase = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
__lowerCAmelCase = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__lowerCAmelCase = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
__lowerCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
__lowerCAmelCase = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
__lowerCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowercase ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase ):
__lowerCAmelCase = p.predictions[0] if isinstance(p.predictions , lowercase ) else p.predictions
__lowerCAmelCase = np.argmax(lowercase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__lowerCAmelCase = default_data_collator
elif training_args.fpaa:
__lowerCAmelCase = DataCollatorWithPadding(lowercase , pad_to_multiple_of=8 )
else:
__lowerCAmelCase = None
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=lowercase , args=lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase , tokenizer=lowercase , data_collator=lowercase , )
# Training
if training_args.do_train:
__lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowercase )
__lowerCAmelCase = train_result.metrics
__lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase )
)
__lowerCAmelCase = min(lowercase , len(lowercase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , lowercase )
trainer.save_metrics("""train""" , lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowerCAmelCase = trainer.evaluate(eval_dataset=lowercase )
__lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase )
__lowerCAmelCase = min(lowercase , len(lowercase ) )
trainer.log_metrics("""eval""" , lowercase )
trainer.save_metrics("""eval""" , lowercase )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__lowerCAmelCase = predict_dataset.remove_columns("""label""" )
__lowerCAmelCase = trainer.predict(lowercase , metric_key_prefix="""predict""" ).predictions
__lowerCAmelCase = np.argmax(lowercase , axis=1 )
__lowerCAmelCase = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(lowercase , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(lowercase ):
__lowerCAmelCase = label_list[item]
writer.write(f'{index}\t{item}\n' )
__lowerCAmelCase = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase )
else:
trainer.create_model_card(**lowercase )
def _lowerCAmelCase ( lowercase ) -> str:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 689 | 1 |
'''simple docstring'''
_a : Optional[Any] = """Alexander Joslin"""
import operator as op
from .stack import Stack
def _lowerCAmelCase ( lowercase ) -> int:
__lowerCAmelCase = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
__lowerCAmelCase = Stack()
__lowerCAmelCase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowercase ) )
elif i in operators:
# RULE 2
operator_stack.push(lowercase )
elif i == ")":
# RULE 4
__lowerCAmelCase = operator_stack.peek()
operator_stack.pop()
__lowerCAmelCase = operand_stack.peek()
operand_stack.pop()
__lowerCAmelCase = operand_stack.peek()
operand_stack.pop()
__lowerCAmelCase = operators[opr](lowercase , lowercase )
operand_stack.push(lowercase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_a : Optional[Any] = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(f'{equation} = {dijkstras_two_stack_algorithm(equation)}')
| 689 |
'''simple docstring'''
import os
import sys
import unittest
_a : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_a : Union[str, Any] = os.path.join(git_repo_path, """src""", """diffusers""")
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = find_backend(""" if not is_torch_available():""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__lowerCAmelCase = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__lowerCAmelCase = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch_and_transformers_and_onnx""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""",__SCREAMING_SNAKE_CASE )
self.assertIn("""torch_and_transformers""",__SCREAMING_SNAKE_CASE )
self.assertIn("""flax_and_transformers""",__SCREAMING_SNAKE_CASE )
self.assertIn("""torch_and_transformers_and_onnx""",__SCREAMING_SNAKE_CASE )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""",objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""",objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""",objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""",objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""",objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""",objects["""torch_and_transformers_and_onnx"""] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = create_dummy_object("""CONSTANT""","""'torch'""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""\nCONSTANT = None\n""" )
__lowerCAmelCase = create_dummy_object("""function""","""'torch'""" )
self.assertEqual(
__SCREAMING_SNAKE_CASE,"""\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
__lowerCAmelCase = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
__lowerCAmelCase = create_dummy_object("""FakeClass""","""'torch'""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
__lowerCAmelCase = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""],__SCREAMING_SNAKE_CASE )
| 689 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_a : Tuple = logging.get_logger(__name__)
_a : Tuple = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
a : Dict ="""nat"""
a : int ={
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self,__SCREAMING_SNAKE_CASE=4,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=64,__SCREAMING_SNAKE_CASE=[3, 4, 6, 5],__SCREAMING_SNAKE_CASE=[2, 4, 8, 16],__SCREAMING_SNAKE_CASE=7,__SCREAMING_SNAKE_CASE=3.0,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE="gelu",__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=1e-5,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = embed_dim
__lowerCAmelCase = depths
__lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = num_heads
__lowerCAmelCase = kernel_size
__lowerCAmelCase = mlp_ratio
__lowerCAmelCase = qkv_bias
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = hidden_act
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowerCAmelCase = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
__lowerCAmelCase = layer_scale_init_value
__lowerCAmelCase = ["""stem"""] + [f'stage{idx}' for idx in range(1,len(__SCREAMING_SNAKE_CASE ) + 1 )]
__lowerCAmelCase , __lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE,out_indices=__SCREAMING_SNAKE_CASE,stage_names=self.stage_names )
| 689 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase ) -> tuple[int, int]:
try:
__lowerCAmelCase = float(lowercase )
except ValueError:
raise ValueError("""Please enter a valid number""" )
__lowerCAmelCase = decimal - int(lowercase )
if fractional_part == 0:
return int(lowercase ), 1
else:
__lowerCAmelCase = len(str(lowercase ).split(""".""" )[1] )
__lowerCAmelCase = int(decimal * (10**number_of_frac_digits) )
__lowerCAmelCase = 10**number_of_frac_digits
__lowerCAmelCase , __lowerCAmelCase = denominator, numerator
while True:
__lowerCAmelCase = dividend % divisor
if remainder == 0:
break
__lowerCAmelCase , __lowerCAmelCase = divisor, remainder
__lowerCAmelCase , __lowerCAmelCase = numerator / divisor, denominator / divisor
return int(lowercase ), int(lowercase )
if __name__ == "__main__":
print(f'{decimal_to_fraction(2) = }')
print(f'{decimal_to_fraction(89.0) = }')
print(f'{decimal_to_fraction("67") = }')
print(f'{decimal_to_fraction("45.0") = }')
print(f'{decimal_to_fraction(1.5) = }')
print(f'{decimal_to_fraction("6.25") = }')
print(f'{decimal_to_fraction("78td") = }')
| 689 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase , lowercase ) -> Optional[Any]:
__lowerCAmelCase = 0
__lowerCAmelCase = len(lowercase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowerCAmelCase = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowercase ):
return None
__lowerCAmelCase = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__lowerCAmelCase = left
__lowerCAmelCase = point
elif point > right:
__lowerCAmelCase = right
__lowerCAmelCase = point
else:
if item < current_item:
__lowerCAmelCase = point - 1
else:
__lowerCAmelCase = point + 1
return None
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase ) -> str:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowerCAmelCase = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowercase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowercase , lowercase , lowercase , lowercase )
elif point > right:
return interpolation_search_by_recursion(lowercase , lowercase , lowercase , lowercase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowercase , lowercase , lowercase , point - 1 )
else:
return interpolation_search_by_recursion(
lowercase , lowercase , point + 1 , lowercase )
def _lowerCAmelCase ( lowercase ) -> List[str]:
if collection != sorted(lowercase ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
_a : Dict = 0
if debug == 1:
_a : Optional[Any] = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("""Sequence must be ascending sorted to apply interpolation search""")
_a : str = 6_7
_a : Optional[int] = interpolation_search(collection, target)
if result is not None:
print(f'{target} found at positions: {result}')
else:
print("""Not found""")
| 689 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_a : Dict = _symbol_database.Default()
_a : Union[str, Any] = _descriptor_pool.Default().AddSerializedFile(
b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
_a : str = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_a : str = None
_a : Union[str, Any] = b"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_a : Optional[int] = 4_5
_a : List[Any] = 1_5_8_1
_a : str = 1_5_1_7
_a : Optional[Any] = 1_5_7_0
_a : List[str] = 1_5_8_4
_a : List[Any] = 1_7_9_3
_a : Union[str, Any] = 1_7_9_5
_a : Tuple = 1_9_1_6
_a : List[Any] = 1_8_6_4
_a : Any = 1_9_0_5
_a : Optional[Any] = 1_9_1_9
_a : Optional[int] = 2_4_2_9
_a : Tuple = 2_2_0_8
_a : Optional[Any] = 2_4_1_8
_a : List[Any] = 2_3_2_3
_a : str = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 689 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase ) -> int:
__lowerCAmelCase = [[0 for _ in range(lowercase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__lowerCAmelCase = 1
for n in range(m + 1 ):
for k in range(1 , lowercase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
_a : Tuple = int(input("""Enter a number: """).strip())
print(partition(n))
except ValueError:
print("""Please enter a number.""")
else:
try:
_a : List[Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print("""Please pass a number.""")
| 689 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : torch.FloatTensor
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE=True,):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = layers_per_block
__lowerCAmelCase = torch.nn.Convad(
__SCREAMING_SNAKE_CASE,block_out_channels[0],kernel_size=3,stride=1,padding=1,)
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
# down
__lowerCAmelCase = block_out_channels[0]
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = block_out_channels[i]
__lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase = get_down_block(
__SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,add_downsample=not is_final_block,resnet_eps=1e-6,downsample_padding=0,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
__lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""",attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
# out
__lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[-1],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = 2 * out_channels if double_z else out_channels
__lowerCAmelCase = nn.Convad(block_out_channels[-1],__SCREAMING_SNAKE_CASE,3,padding=1 )
__lowerCAmelCase = False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = x
__lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(""">=""","""1.11.0""" ):
for down_block in self.down_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
__lowerCAmelCase = down_block(__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE )
# post-process
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE="group",):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = layers_per_block
__lowerCAmelCase = nn.Convad(
__SCREAMING_SNAKE_CASE,block_out_channels[-1],kernel_size=3,stride=1,padding=1,)
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
__lowerCAmelCase = in_channels if norm_type == """spatial""" else None
# mid
__lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type,attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
# up
__lowerCAmelCase = list(reversed(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = reversed_block_out_channels[i]
__lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase = get_up_block(
__SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block + 1,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,prev_output_channel=__SCREAMING_SNAKE_CASE,add_upsample=not is_final_block,resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,resnet_time_scale_shift=__SCREAMING_SNAKE_CASE,)
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = output_channel
# out
if norm_type == "spatial":
__lowerCAmelCase = SpatialNorm(block_out_channels[0],__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[0],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = nn.Convad(block_out_channels[0],__SCREAMING_SNAKE_CASE,3,padding=1 )
__lowerCAmelCase = False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
__lowerCAmelCase = z
__lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(""">=""","""1.11.0""" ):
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
else:
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
else:
# middle
__lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = up_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="random",__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = n_e
__lowerCAmelCase = vq_embed_dim
__lowerCAmelCase = beta
__lowerCAmelCase = legacy
__lowerCAmelCase = nn.Embedding(self.n_e,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e,1.0 / self.n_e )
__lowerCAmelCase = remap
if self.remap is not None:
self.register_buffer("""used""",torch.tensor(np.load(self.remap ) ) )
__lowerCAmelCase = self.used.shape[0]
__lowerCAmelCase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__lowerCAmelCase = self.re_embed
__lowerCAmelCase = self.re_embed + 1
print(
f'Remapping {self.n_e} indices to {self.re_embed} indices. '
f'Using {self.unknown_index} for unknown indices.' )
else:
__lowerCAmelCase = n_e
__lowerCAmelCase = sane_index_shape
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
__lowerCAmelCase = inds.reshape(ishape[0],-1 )
__lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = (inds[:, :, None] == used[None, None, ...]).long()
__lowerCAmelCase = match.argmax(-1 )
__lowerCAmelCase = match.sum(2 ) < 1
if self.unknown_index == "random":
__lowerCAmelCase = torch.randint(0,self.re_embed,size=new[unknown].shape ).to(device=new.device )
else:
__lowerCAmelCase = self.unknown_index
return new.reshape(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
__lowerCAmelCase = inds.reshape(ishape[0],-1 )
__lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
__lowerCAmelCase = 0 # simply set to zero
__lowerCAmelCase = torch.gather(used[None, :][inds.shape[0] * [0], :],1,__SCREAMING_SNAKE_CASE )
return back.reshape(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = z.permute(0,2,3,1 ).contiguous()
__lowerCAmelCase = z.view(-1,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__lowerCAmelCase = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE,self.embedding.weight ),dim=1 )
__lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape )
__lowerCAmelCase = None
__lowerCAmelCase = None
# compute loss for embedding
if not self.legacy:
__lowerCAmelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__lowerCAmelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__lowerCAmelCase = z + (z_q - z).detach()
# reshape back to match original input shape
__lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous()
if self.remap is not None:
__lowerCAmelCase = min_encoding_indices.reshape(z.shape[0],-1 ) # add batch axis
__lowerCAmelCase = self.remap_to_used(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = min_encoding_indices.reshape(-1,1 ) # flatten
if self.sane_index_shape:
__lowerCAmelCase = min_encoding_indices.reshape(z_q.shape[0],z_q.shape[2],z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if self.remap is not None:
__lowerCAmelCase = indices.reshape(shape[0],-1 ) # add batch axis
__lowerCAmelCase = self.unmap_to_all(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE )
if shape is not None:
__lowerCAmelCase = z_q.view(__SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
__lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous()
return z_q
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = parameters
__lowerCAmelCase , __lowerCAmelCase = torch.chunk(__SCREAMING_SNAKE_CASE,2,dim=1 )
__lowerCAmelCase = torch.clamp(self.logvar,-30.0,20.0 )
__lowerCAmelCase = deterministic
__lowerCAmelCase = torch.exp(0.5 * self.logvar )
__lowerCAmelCase = torch.exp(self.logvar )
if self.deterministic:
__lowerCAmelCase = __lowerCAmelCase = torch.zeros_like(
self.mean,device=self.parameters.device,dtype=self.parameters.dtype )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
__lowerCAmelCase = randn_tensor(
self.mean.shape,generator=__SCREAMING_SNAKE_CASE,device=self.parameters.device,dtype=self.parameters.dtype )
__lowerCAmelCase = self.mean + self.std * sample
return x
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean,2 ) + self.var - 1.0 - self.logvar,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar,dim=[1, 2, 3],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
__lowerCAmelCase = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean,2 ) / self.var,dim=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.mean
| 689 | 1 |
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_a : str = logging.getLogger(__name__)
def _lowerCAmelCase ( lowercase , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = False , ) -> str:
__lowerCAmelCase = bnb_quantization_config.load_in_abit
__lowerCAmelCase = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
__lowerCAmelCase = []
# custom device map
if isinstance(lowercase , lowercase ) and len(device_map.keys() ) > 1:
__lowerCAmelCase = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
__lowerCAmelCase = get_keys_to_not_convert(lowercase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(lowercase )
__lowerCAmelCase = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
__lowerCAmelCase = []
__lowerCAmelCase = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(lowercase )
# compatibility with peft
__lowerCAmelCase = load_in_abit
__lowerCAmelCase = load_in_abit
__lowerCAmelCase = get_parameter_device(lowercase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
__lowerCAmelCase = replace_with_bnb_layers(lowercase , lowercase , modules_to_not_convert=lowercase )
# convert param to the right dtype
__lowerCAmelCase = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
__lowerCAmelCase = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
__lowerCAmelCase = getattr(lowercase , lowercase , lowercase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(lowercase ):
param.to(lowercase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'The model device type is {model_device.type}. However, cuda is needed for quantization.'
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' )
else:
with init_empty_weights():
__lowerCAmelCase = replace_with_bnb_layers(
lowercase , lowercase , modules_to_not_convert=lowercase )
__lowerCAmelCase = get_quantized_model_device_map(
lowercase , lowercase , lowercase , max_memory=lowercase , no_split_module_classes=lowercase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
__lowerCAmelCase = True
__lowerCAmelCase = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
lowercase , lowercase , lowercase , dtype=bnb_quantization_config.torch_dtype , offload_folder=lowercase , offload_state_dict=lowercase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(lowercase , device_map=lowercase , offload_dir=lowercase )
def _lowerCAmelCase ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=None ) -> Dict:
if device_map is None:
if torch.cuda.is_available():
__lowerCAmelCase = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(lowercase , lowercase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
__lowerCAmelCase = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
__lowerCAmelCase = {}
__lowerCAmelCase = special_dtypes
__lowerCAmelCase = no_split_module_classes
__lowerCAmelCase = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
__lowerCAmelCase = get_balanced_memory(
lowercase , low_zero=(device_map == """balanced_low_0""") , max_memory=lowercase , **lowercase , )
__lowerCAmelCase = max_memory
__lowerCAmelCase = infer_auto_device_map(lowercase , **lowercase )
if isinstance(lowercase , lowercase ):
# check if don't have any quantized module on the cpu
__lowerCAmelCase = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
__lowerCAmelCase = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def _lowerCAmelCase ( lowercase , lowercase , lowercase=None , lowercase=None ) -> Union[str, Any]:
if modules_to_not_convert is None:
__lowerCAmelCase = []
__lowerCAmelCase , __lowerCAmelCase = _replace_with_bnb_layers(
lowercase , lowercase , lowercase , lowercase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def _lowerCAmelCase ( lowercase , lowercase , lowercase=None , lowercase=None , ) -> Tuple:
__lowerCAmelCase = False
for name, module in model.named_children():
if current_key_name is None:
__lowerCAmelCase = []
current_key_name.append(lowercase )
if isinstance(lowercase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
__lowerCAmelCase = """.""".join(lowercase )
__lowerCAmelCase = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
__lowerCAmelCase = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
__lowerCAmelCase = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=lowercase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
__lowerCAmelCase = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
__lowerCAmelCase = module.weight.data
if module.bias is not None:
__lowerCAmelCase = module.bias.data
bnb_module.requires_grad_(lowercase )
setattr(lowercase , lowercase , lowercase )
__lowerCAmelCase = True
if len(list(module.children() ) ) > 0:
__lowerCAmelCase , __lowerCAmelCase = _replace_with_bnb_layers(
lowercase , lowercase , lowercase , lowercase )
__lowerCAmelCase = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _lowerCAmelCase ( lowercase ) -> str:
# Create a copy of the model
with init_empty_weights():
__lowerCAmelCase = deepcopy(lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
__lowerCAmelCase = find_tied_parameters(lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(lowercase , lowercase ):
__lowerCAmelCase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
__lowerCAmelCase = sum(lowercase , [] )
__lowerCAmelCase = len(lowercase ) > 0
# Check if it is a base model
__lowerCAmelCase = False
if hasattr(lowercase , """base_model_prefix""" ):
__lowerCAmelCase = not hasattr(lowercase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__lowerCAmelCase = list(model.named_children() )
__lowerCAmelCase = [list_modules[-1][0]]
# add last module together with tied weights
__lowerCAmelCase = set(lowercase ) - set(lowercase )
__lowerCAmelCase = list(set(lowercase ) ) + list(lowercase )
# remove ".weight" from the keys
__lowerCAmelCase = [""".weight""", """.bias"""]
__lowerCAmelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__lowerCAmelCase = name.replace(lowercase , """""" )
filtered_module_names.append(lowercase )
return filtered_module_names
def _lowerCAmelCase ( lowercase ) -> List[Any]:
for m in model.modules():
if isinstance(lowercase , bnb.nn.Linearabit ):
return True
return False
def _lowerCAmelCase ( lowercase ) -> int:
return next(parameter.parameters() ).device
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(lowercase , lowercase , 0 , dtype=lowercase , value=lowercase )
__lowerCAmelCase = param_name
__lowerCAmelCase = model
if "." in tensor_name:
__lowerCAmelCase = tensor_name.split(""".""" )
for split in splits[:-1]:
__lowerCAmelCase = getattr(lowercase , lowercase )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
__lowerCAmelCase = new_module
__lowerCAmelCase = splits[-1]
# offload weights
__lowerCAmelCase = False
offload_weight(module._parameters[tensor_name] , lowercase , lowercase , index=lowercase )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , lowercase , index=lowercase , )
else:
offload_weight(lowercase , lowercase , lowercase , index=lowercase )
offload_weight(lowercase , param_name.replace("""weight""" , """SCB""" ) , lowercase , index=lowercase )
set_module_tensor_to_device(lowercase , lowercase , """meta""" , dtype=lowercase , value=torch.empty(*param.size() ) )
| 689 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_a : Optional[int] = logging.get_logger(__name__)
_a : int = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_a : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(lowerCAmelCase_ )} )
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
a : int =field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : int =field(
default=1_28 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
a : int =field(
default=64 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
a : int =field(
default=30 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
a : float =field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a : int =field(
default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a : int =field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
a : int =field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Optional[Any] ="""train"""
a : Optional[int] ="""dev"""
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : SquadDataTrainingArguments
a : List[SquadFeatures]
a : Split
a : bool
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = Split.train,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = "pt",):
'''simple docstring'''
__lowerCAmelCase = args
__lowerCAmelCase = is_language_sensitive
__lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
try:
__lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
__lowerCAmelCase = mode
# Load data features from cache or dataset file
__lowerCAmelCase = """v2""" if args.version_2_with_negative else """v1"""
__lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase = cached_features_file + """.lock"""
with FileLock(__SCREAMING_SNAKE_CASE ):
if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache:
__lowerCAmelCase = time.time()
__lowerCAmelCase = torch.load(__SCREAMING_SNAKE_CASE )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__lowerCAmelCase = self.old_features["""features"""]
__lowerCAmelCase = self.old_features.get("""dataset""",__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.old_features.get("""examples""",__SCREAMING_SNAKE_CASE )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
""" future run""" )
else:
if mode == Split.dev:
__lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
__lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
__lowerCAmelCase , __lowerCAmelCase = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__SCREAMING_SNAKE_CASE,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples},__SCREAMING_SNAKE_CASE,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.features[i]
__lowerCAmelCase = torch.tensor(feature.input_ids,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.attention_mask,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.token_type_ids,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.cls_index,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.p_mask,dtype=torch.float )
__lowerCAmelCase = torch.tensor(feature.is_impossible,dtype=torch.float )
__lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__lowerCAmelCase = torch.tensor(feature.start_position,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 689 | 1 |
'''simple docstring'''
_a : List[Any] = [
"""Audio""",
"""Array2D""",
"""Array3D""",
"""Array4D""",
"""Array5D""",
"""ClassLabel""",
"""Features""",
"""Sequence""",
"""Value""",
"""Image""",
"""Translation""",
"""TranslationVariableLanguages""",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 689 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _lowerCAmelCase ( lowercase ) -> Optional[Any]:
# vision encoder
if "img_encoder.pos_embed" in name:
__lowerCAmelCase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
__lowerCAmelCase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
__lowerCAmelCase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
__lowerCAmelCase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
__lowerCAmelCase = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
__lowerCAmelCase = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
__lowerCAmelCase = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
__lowerCAmelCase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
__lowerCAmelCase = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
__lowerCAmelCase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
__lowerCAmelCase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
__lowerCAmelCase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
__lowerCAmelCase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
__lowerCAmelCase = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
__lowerCAmelCase = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
__lowerCAmelCase = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
__lowerCAmelCase = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
__lowerCAmelCase = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
__lowerCAmelCase = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
__lowerCAmelCase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
__lowerCAmelCase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
__lowerCAmelCase = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
__lowerCAmelCase = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> Dict:
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(lowercase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase = key.split(""".""" )
__lowerCAmelCase , __lowerCAmelCase = int(key_split[2] ), int(key_split[4] )
__lowerCAmelCase = config.vision_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase = key.split(""".""" )
__lowerCAmelCase = int(key_split[3] )
__lowerCAmelCase = config.text_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[
dim : dim * 2, :
]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
else:
__lowerCAmelCase = rename_key(lowercase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
__lowerCAmelCase = val.squeeze_()
else:
__lowerCAmelCase = val
return orig_state_dict
def _lowerCAmelCase ( ) -> str:
__lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCAmelCase = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase , lowercase="groupvit-gcc-yfcc" , lowercase=False ) -> List[Any]:
__lowerCAmelCase = GroupViTConfig()
__lowerCAmelCase = GroupViTModel(lowercase ).eval()
__lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" )["""model"""]
__lowerCAmelCase = convert_state_dict(lowercase , lowercase )
__lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowercase , strict=lowercase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowercase ) == 0)
# verify result
__lowerCAmelCase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowercase , padding=lowercase , return_tensors="""pt""" )
with torch.no_grad():
__lowerCAmelCase = model(**lowercase )
if model_name == "groupvit-gcc-yfcc":
__lowerCAmelCase = torch.tensor([[13.35_23, 6.36_29]] )
elif model_name == "groupvit-gcc-redcaps":
__lowerCAmelCase = torch.tensor([[16.18_73, 8.62_30]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , lowercase , atol=1e-3 )
processor.save_pretrained(lowercase )
model.save_pretrained(lowercase )
print("""Successfully saved processor and model to""" , lowercase )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowercase , organization="""nielsr""" )
model.push_to_hub(lowercase , organization="""nielsr""" )
if __name__ == "__main__":
_a : int = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
_a : List[str] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 689 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : int ="""naver-clova-ix/donut-base-finetuned-docvqa"""
a : Any =(
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
a : Tuple ="""document_qa"""
a : List[Any] =AutoProcessor
a : int =VisionEncoderDecoderModel
a : Optional[Any] =["""image""", """text"""]
a : int =["""text"""]
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
__lowerCAmelCase = task_prompt.replace("""{user_input}""",__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.pre_processor.tokenizer(
__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" ).input_ids
__lowerCAmelCase = self.pre_processor(__SCREAMING_SNAKE_CASE,return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.model.generate(
inputs["""pixel_values"""].to(self.device ),decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ),max_length=self.model.decoder.config.max_position_embeddings,early_stopping=__SCREAMING_SNAKE_CASE,pad_token_id=self.pre_processor.tokenizer.pad_token_id,eos_token_id=self.pre_processor.tokenizer.eos_token_id,use_cache=__SCREAMING_SNAKE_CASE,num_beams=1,bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]],return_dict_in_generate=__SCREAMING_SNAKE_CASE,).sequences
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.pre_processor.batch_decode(__SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token,"""""" )
__lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token,"""""" )
__lowerCAmelCase = re.sub(R"""<.*?>""","""""",__SCREAMING_SNAKE_CASE,count=1 ).strip() # remove first task start token
__lowerCAmelCase = self.pre_processor.tokenajson(__SCREAMING_SNAKE_CASE )
return sequence["answer"]
| 689 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_a : Tuple = logging.get_logger(__name__)
_a : Optional[int] = ["""model.decoder.embed_positions.weights"""]
def _lowerCAmelCase ( lowercase ) -> Optional[Any]:
if "emb" in name:
__lowerCAmelCase = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
__lowerCAmelCase = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
__lowerCAmelCase = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
__lowerCAmelCase = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
__lowerCAmelCase = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
__lowerCAmelCase = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
__lowerCAmelCase = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
__lowerCAmelCase = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
__lowerCAmelCase = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
__lowerCAmelCase = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> Tuple[Dict, Dict]:
__lowerCAmelCase = list(state_dict.keys() )
__lowerCAmelCase = {}
for key in keys:
__lowerCAmelCase = state_dict.pop(lowercase )
__lowerCAmelCase = rename_keys(lowercase )
if "in_proj_weight" in key:
# split fused qkv proj
__lowerCAmelCase = val[:hidden_size, :]
__lowerCAmelCase = val[hidden_size : 2 * hidden_size, :]
__lowerCAmelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__lowerCAmelCase = val
else:
__lowerCAmelCase = val
return state_dict, enc_dec_proj_state_dict
def _lowerCAmelCase ( lowercase ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
__lowerCAmelCase = 1024
__lowerCAmelCase = 24
__lowerCAmelCase = 16
elif checkpoint == "medium":
__lowerCAmelCase = 1536
__lowerCAmelCase = 48
__lowerCAmelCase = 24
elif checkpoint == "large":
__lowerCAmelCase = 2048
__lowerCAmelCase = 48
__lowerCAmelCase = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
__lowerCAmelCase = MusicgenDecoderConfig(
hidden_size=lowercase , ffn_dim=hidden_size * 4 , num_hidden_layers=lowercase , num_attention_heads=lowercase , )
return config
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase=None , lowercase=None , lowercase="cpu" ) -> Optional[Any]:
__lowerCAmelCase = MusicGen.get_pretrained(lowercase , device=lowercase )
__lowerCAmelCase = decoder_config_from_checkpoint(lowercase )
__lowerCAmelCase = fairseq_model.lm.state_dict()
__lowerCAmelCase , __lowerCAmelCase = rename_state_dict(
lowercase , hidden_size=decoder_config.hidden_size )
__lowerCAmelCase = TaEncoderModel.from_pretrained("""t5-base""" )
__lowerCAmelCase = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
__lowerCAmelCase = MusicgenForCausalLM(lowercase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__lowerCAmelCase , __lowerCAmelCase = decoder.load_state_dict(lowercase , strict=lowercase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase )
if len(lowercase ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(lowercase ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
__lowerCAmelCase = MusicgenForConditionalGeneration(text_encoder=lowercase , audio_encoder=lowercase , decoder=lowercase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase )
# check we can do a forward pass
__lowerCAmelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__lowerCAmelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__lowerCAmelCase = model(input_ids=lowercase , decoder_input_ids=lowercase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
__lowerCAmelCase = AutoTokenizer.from_pretrained("""t5-base""" )
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
__lowerCAmelCase = MusicgenProcessor(feature_extractor=lowercase , tokenizer=lowercase )
# set the appropriate bos/pad token ids
__lowerCAmelCase = 2048
__lowerCAmelCase = 2048
# set other default generation config params
__lowerCAmelCase = int(30 * audio_encoder.config.frame_rate )
__lowerCAmelCase = True
__lowerCAmelCase = 3.0
if pytorch_dump_folder is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(lowercase )
processor.save_pretrained(lowercase )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(lowercase )
processor.push_to_hub(lowercase )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
_a : List[Any] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 689 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class _UpperCAmelCase :
def __init__( self,__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = parent
__lowerCAmelCase = 13
__lowerCAmelCase = 7
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = 2
__lowerCAmelCase = 99
__lowerCAmelCase = 0
__lowerCAmelCase = 32
__lowerCAmelCase = 2
__lowerCAmelCase = 4
__lowerCAmelCase = 0.1
__lowerCAmelCase = 0.1
__lowerCAmelCase = 5_12
__lowerCAmelCase = 16
__lowerCAmelCase = 2
__lowerCAmelCase = 0.02
__lowerCAmelCase = 3
__lowerCAmelCase = 4
__lowerCAmelCase = """last"""
__lowerCAmelCase = True
__lowerCAmelCase = None
__lowerCAmelCase = 0
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length],dtype=tf.floataa )
__lowerCAmelCase = None
if self.use_input_lengths:
__lowerCAmelCase = (
ids_tensor([self.batch_size],vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length],self.n_langs )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size],self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size],2,dtype=tf.floataa )
__lowerCAmelCase = ids_tensor([self.batch_size],self.num_choices )
__lowerCAmelCase = FlaubertConfig(
vocab_size=self.vocab_size,n_special=self.n_special,emb_dim=self.hidden_size,n_layers=self.num_hidden_layers,n_heads=self.num_attention_heads,dropout=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,gelu_activation=self.gelu_activation,sinusoidal_embeddings=self.sinusoidal_embeddings,asm=self.asm,causal=self.causal,n_langs=self.n_langs,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,summary_type=self.summary_type,use_proj=self.use_proj,bos_token_id=self.bos_token_id,)
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = TFFlaubertModel(config=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [input_ids, input_mask]
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = TFFlaubertWithLMHeadModel(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = TFFlaubertForQuestionAnsweringSimple(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths}
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = TFFlaubertForSequenceClassification(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths}
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFFlaubertForTokenClassification(config=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = self.num_choices
__lowerCAmelCase = TFFlaubertForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ),(1, self.num_choices, 1) )
__lowerCAmelCase = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ),(1, self.num_choices, 1) )
__lowerCAmelCase = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ),(1, self.num_choices, 1) )
__lowerCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
a : List[Any] =(
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
a : Union[str, Any] =(
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
a : Optional[int] =(
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
a : Dict =False
a : str =False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = TFFlaubertModelTester(self )
__lowerCAmelCase = ConfigTester(self,config_class=__SCREAMING_SNAKE_CASE,emb_dim=37 )
def lowerCamelCase__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__SCREAMING_SNAKE_CASE )
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = TFFlaubertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
__lowerCAmelCase = tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]],dtype=tf.intaa,) # "J'aime flaubert !"
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase = tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape,__SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
__lowerCAmelCase = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
],dtype=tf.floataa,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy(),expected_slice.numpy(),atol=1e-4 ) )
| 689 |
'''simple docstring'''
from collections import deque
def _lowerCAmelCase ( lowercase ) -> Dict:
__lowerCAmelCase = len(lowercase )
__lowerCAmelCase = deque()
__lowerCAmelCase = [False for _ in range(lowercase )]
__lowerCAmelCase = [-1 for _ in range(lowercase )]
__lowerCAmelCase = index_of[:]
def strong_connect(lowercase , lowercase , lowercase ):
__lowerCAmelCase = index # the number when this node is seen
__lowerCAmelCase = index # lowest rank node reachable from here
index += 1
stack.append(lowercase )
__lowerCAmelCase = True
for w in g[v]:
if index_of[w] == -1:
__lowerCAmelCase = strong_connect(lowercase , lowercase , lowercase )
__lowerCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__lowerCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__lowerCAmelCase = []
__lowerCAmelCase = stack.pop()
__lowerCAmelCase = False
component.append(lowercase )
while w != v:
__lowerCAmelCase = stack.pop()
__lowerCAmelCase = False
component.append(lowercase )
components.append(lowercase )
return index
__lowerCAmelCase = []
for v in range(lowercase ):
if index_of[v] == -1:
strong_connect(lowercase , 0 , lowercase )
return components
def _lowerCAmelCase ( lowercase , lowercase ) -> str:
__lowerCAmelCase = [[] for _ in range(lowercase )]
for u, v in edges:
g[u].append(lowercase )
return g
if __name__ == "__main__":
# Test
_a : Any = 7
_a : Tuple = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_a : Optional[int] = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_a : Optional[Any] = [(u, v) for u, v in zip(source, target)]
_a : Optional[int] = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 689 | 1 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True)
def _lowerCAmelCase ( lowercase ) -> int:
if hor == 128:
__lowerCAmelCase = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
__lowerCAmelCase = (32, 128, 256)
__lowerCAmelCase = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
__lowerCAmelCase = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
__lowerCAmelCase = (32, 64, 128, 256)
__lowerCAmelCase = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
__lowerCAmelCase = torch.load(f'/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch' )
__lowerCAmelCase = model.state_dict()
__lowerCAmelCase = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_5536,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
__lowerCAmelCase = UNetaDModel(**lowercase )
print(f'length of state dict: {len(state_dict.keys() )}' )
print(f'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
__lowerCAmelCase = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__lowerCAmelCase = state_dict.pop(lowercase )
hf_value_function.load_state_dict(lowercase )
torch.save(hf_value_function.state_dict() , f'hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin' )
with open(f'hub/hopper-medium-v2/unet/hor{hor}/config.json' , """w""" ) as f:
json.dump(lowercase , lowercase )
def _lowerCAmelCase ( ) -> Tuple:
__lowerCAmelCase = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 128, 256),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_5536,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
__lowerCAmelCase = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
__lowerCAmelCase = model
__lowerCAmelCase = UNetaDModel(**lowercase )
print(f'length of state dict: {len(state_dict.keys() )}' )
print(f'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
__lowerCAmelCase = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__lowerCAmelCase = state_dict.pop(lowercase )
hf_value_function.load_state_dict(lowercase )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(lowercase , lowercase )
if __name__ == "__main__":
unet(3_2)
# unet(128)
value_function()
| 689 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ) -> Union[str, Any]:
__lowerCAmelCase = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
__lowerCAmelCase = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(lowercase )
# Let's go
__lowerCAmelCase = parser.parse_args()
if not hasattr(lowercase , """func""" ):
parser.print_help()
exit(1 )
# Run
__lowerCAmelCase = args.func(lowercase )
service.run()
if __name__ == "__main__":
main()
| 689 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase ) -> bool:
if not isinstance(lowercase , lowercase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
__lowerCAmelCase = str(lowercase )
__lowerCAmelCase = """""".join(sorted(lowercase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _lowerCAmelCase ( lowercase = 99 ) -> int:
if not 0 < percent < 100:
raise ValueError("""solution() only accepts values from 0 to 100""" )
__lowerCAmelCase = 0
__lowerCAmelCase = 1
while True:
if check_bouncy(lowercase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'{solution(9_9)}')
| 689 |
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_a : List[Any] = logging.get_logger(__name__)
_a : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
_a : Any = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> str:
for attribute in key.split(""".""" ):
__lowerCAmelCase = getattr(lowercase , lowercase )
if weight_type is not None:
__lowerCAmelCase = getattr(lowercase , lowercase ).shape
else:
__lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
__lowerCAmelCase = value
elif weight_type == "weight_g":
__lowerCAmelCase = value
elif weight_type == "weight_v":
__lowerCAmelCase = value
elif weight_type == "bias":
__lowerCAmelCase = value
else:
__lowerCAmelCase = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]:
__lowerCAmelCase = []
__lowerCAmelCase = fairseq_model.state_dict()
__lowerCAmelCase = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , )
__lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
__lowerCAmelCase = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split(""".""" )[:-1] ) != key):
# special case since naming is very similar
continue
__lowerCAmelCase = True
if "*" in mapped_key:
__lowerCAmelCase = name.split(lowercase )[0].split(""".""" )[-2]
__lowerCAmelCase = mapped_key.replace("""*""" , lowercase )
if "weight_g" in name:
__lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
__lowerCAmelCase = """weight_v"""
elif "bias" in name:
__lowerCAmelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase = """weight"""
else:
__lowerCAmelCase = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__lowerCAmelCase = full_name.split("""conv_layers.""" )[-1]
__lowerCAmelCase = name.split(""".""" )
__lowerCAmelCase = int(items[0] )
__lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Dict:
if config_path is not None:
__lowerCAmelCase = UniSpeechSatConfig.from_pretrained(lowercase )
else:
__lowerCAmelCase = UniSpeechSatConfig()
__lowerCAmelCase = """"""
if is_finetuned:
__lowerCAmelCase = UniSpeechSatForCTC(lowercase )
else:
__lowerCAmelCase = UniSpeechSatForPreTraining(lowercase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
__lowerCAmelCase = model[0].eval()
recursively_load_weights(lowercase , lowercase )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
_a : List[str] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_a : Union[str, Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 689 | 1 |
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_a : Dict = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_a : Dict = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def _lowerCAmelCase ( lowercase ) -> str:
__lowerCAmelCase = numpy.dtype(numpy.uintaa ).newbyteorder(""">""" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=lowercase )[0]
@deprecated(lowercase , """Please use tf.data to implement this functionality.""" )
def _lowerCAmelCase ( lowercase ) -> Any:
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=lowercase ) as bytestream:
__lowerCAmelCase = _readaa(lowercase )
if magic != 2051:
raise ValueError(
"""Invalid magic number %d in MNIST image file: %s""" % (magic, f.name) )
__lowerCAmelCase = _readaa(lowercase )
__lowerCAmelCase = _readaa(lowercase )
__lowerCAmelCase = _readaa(lowercase )
__lowerCAmelCase = bytestream.read(rows * cols * num_images )
__lowerCAmelCase = numpy.frombuffer(lowercase , dtype=numpy.uinta )
__lowerCAmelCase = data.reshape(lowercase , lowercase , lowercase , 1 )
return data
@deprecated(lowercase , """Please use tf.one_hot on tensors.""" )
def _lowerCAmelCase ( lowercase , lowercase ) -> List[str]:
__lowerCAmelCase = labels_dense.shape[0]
__lowerCAmelCase = numpy.arange(lowercase ) * num_classes
__lowerCAmelCase = numpy.zeros((num_labels, num_classes) )
__lowerCAmelCase = 1
return labels_one_hot
@deprecated(lowercase , """Please use tf.data to implement this functionality.""" )
def _lowerCAmelCase ( lowercase , lowercase=False , lowercase=10 ) -> int:
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=lowercase ) as bytestream:
__lowerCAmelCase = _readaa(lowercase )
if magic != 2049:
raise ValueError(
"""Invalid magic number %d in MNIST label file: %s""" % (magic, f.name) )
__lowerCAmelCase = _readaa(lowercase )
__lowerCAmelCase = bytestream.read(lowercase )
__lowerCAmelCase = numpy.frombuffer(lowercase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(lowercase , lowercase )
return labels
class _UpperCAmelCase :
@deprecated(
__SCREAMING_SNAKE_CASE,"""Please use alternatives such as official/mnist/_DataSet.py"""
""" from tensorflow/models.""",)
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=dtypes.floataa,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=None,):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = random_seed.get_seed(__SCREAMING_SNAKE_CASE )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__lowerCAmelCase = dtypes.as_dtype(__SCREAMING_SNAKE_CASE ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype )
if fake_data:
__lowerCAmelCase = 1_00_00
__lowerCAmelCase = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'images.shape: {images.shape} labels.shape: {labels.shape}'
__lowerCAmelCase = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__lowerCAmelCase = images.reshape(
images.shape[0],images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__lowerCAmelCase = images.astype(numpy.floataa )
__lowerCAmelCase = numpy.multiply(__SCREAMING_SNAKE_CASE,1.0 / 255.0 )
__lowerCAmelCase = images
__lowerCAmelCase = labels
__lowerCAmelCase = 0
__lowerCAmelCase = 0
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._images
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._labels
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._num_examples
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._epochs_completed
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
if fake_data:
__lowerCAmelCase = [1] * 7_84
__lowerCAmelCase = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__SCREAMING_SNAKE_CASE )],
[fake_label for _ in range(__SCREAMING_SNAKE_CASE )],
)
__lowerCAmelCase = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__lowerCAmelCase = numpy.arange(self._num_examples )
numpy.random.shuffle(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.images[perma]
__lowerCAmelCase = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__lowerCAmelCase = self._num_examples - start
__lowerCAmelCase = self._images[start : self._num_examples]
__lowerCAmelCase = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__lowerCAmelCase = numpy.arange(self._num_examples )
numpy.random.shuffle(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.images[perm]
__lowerCAmelCase = self.labels[perm]
# Start next epoch
__lowerCAmelCase = 0
__lowerCAmelCase = batch_size - rest_num_examples
__lowerCAmelCase = self._index_in_epoch
__lowerCAmelCase = self._images[start:end]
__lowerCAmelCase = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part),axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part),axis=0 ),
)
else:
self._index_in_epoch += batch_size
__lowerCAmelCase = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(lowercase , """Please write your own downloading logic.""" )
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> Optional[int]:
if not gfile.Exists(lowercase ):
gfile.MakeDirs(lowercase )
__lowerCAmelCase = os.path.join(lowercase , lowercase )
if not gfile.Exists(lowercase ):
urllib.request.urlretrieve(lowercase , lowercase ) # noqa: S310
with gfile.GFile(lowercase ) as f:
__lowerCAmelCase = f.size()
print("""Successfully downloaded""" , lowercase , lowercase , """bytes.""" )
return filepath
@deprecated(
lowercase , """Please use alternatives such as:""" """ tensorflow_datasets.load('mnist')""" )
def _lowerCAmelCase ( lowercase , lowercase=False , lowercase=False , lowercase=dtypes.floataa , lowercase=True , lowercase=5000 , lowercase=None , lowercase=DEFAULT_SOURCE_URL , ) -> int:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=lowercase , one_hot=lowercase , dtype=lowercase , seed=lowercase )
__lowerCAmelCase = fake()
__lowerCAmelCase = fake()
__lowerCAmelCase = fake()
return _Datasets(train=lowercase , validation=lowercase , test=lowercase )
if not source_url: # empty string check
__lowerCAmelCase = DEFAULT_SOURCE_URL
__lowerCAmelCase = """train-images-idx3-ubyte.gz"""
__lowerCAmelCase = """train-labels-idx1-ubyte.gz"""
__lowerCAmelCase = """t10k-images-idx3-ubyte.gz"""
__lowerCAmelCase = """t10k-labels-idx1-ubyte.gz"""
__lowerCAmelCase = _maybe_download(
lowercase , lowercase , source_url + train_images_file )
with gfile.Open(lowercase , """rb""" ) as f:
__lowerCAmelCase = _extract_images(lowercase )
__lowerCAmelCase = _maybe_download(
lowercase , lowercase , source_url + train_labels_file )
with gfile.Open(lowercase , """rb""" ) as f:
__lowerCAmelCase = _extract_labels(lowercase , one_hot=lowercase )
__lowerCAmelCase = _maybe_download(
lowercase , lowercase , source_url + test_images_file )
with gfile.Open(lowercase , """rb""" ) as f:
__lowerCAmelCase = _extract_images(lowercase )
__lowerCAmelCase = _maybe_download(
lowercase , lowercase , source_url + test_labels_file )
with gfile.Open(lowercase , """rb""" ) as f:
__lowerCAmelCase = _extract_labels(lowercase , one_hot=lowercase )
if not 0 <= validation_size <= len(lowercase ):
__lowerCAmelCase = (
"""Validation size should be between 0 and """
f'{len(lowercase )}. Received: {validation_size}.'
)
raise ValueError(lowercase )
__lowerCAmelCase = train_images[:validation_size]
__lowerCAmelCase = train_labels[:validation_size]
__lowerCAmelCase = train_images[validation_size:]
__lowerCAmelCase = train_labels[validation_size:]
__lowerCAmelCase = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed}
__lowerCAmelCase = _DataSet(lowercase , lowercase , **lowercase )
__lowerCAmelCase = _DataSet(lowercase , lowercase , **lowercase )
__lowerCAmelCase = _DataSet(lowercase , lowercase , **lowercase )
return _Datasets(train=lowercase , validation=lowercase , test=lowercase )
| 689 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
_a : str = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
_a : Dict = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
_a : List[str] = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ),reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = spearmanr(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 689 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : List[Any] =CTRLTokenizer
a : Tuple =False
a : List[str] =False
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCAmelCase = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
__lowerCAmelCase = dict(zip(__SCREAMING_SNAKE_CASE,range(len(__SCREAMING_SNAKE_CASE ) ) ) )
__lowerCAmelCase = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
__lowerCAmelCase = {"""unk_token""": """<unk>"""}
__lowerCAmelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCAmelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file,"""w""",encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file,"""w""",encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def lowerCamelCase__ ( self,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = """adapt react readapt apt"""
__lowerCAmelCase = """adapt react readapt apt"""
return input_text, output_text
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = CTRLTokenizer(self.vocab_file,self.merges_file,**self.special_tokens_map )
__lowerCAmelCase = """adapt react readapt apt"""
__lowerCAmelCase = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
__lowerCAmelCase = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokens + [tokenizer.unk_token]
__lowerCAmelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
| 689 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=lowerCAmelCase_ ):
a : List[str] =["""onnx"""]
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(self,["""onnx"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""onnx"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""onnx"""] )
| 689 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=lowerCAmelCase_ ):
a : List[str] =["""onnx"""]
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(self,["""onnx"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""onnx"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""onnx"""] )
| 689 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_a : int = logging.get_logger(__name__)
_a : Optional[int] = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[str] ="""gptj"""
a : Optional[int] ={
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self,__SCREAMING_SNAKE_CASE=5_04_00,__SCREAMING_SNAKE_CASE=20_48,__SCREAMING_SNAKE_CASE=40_96,__SCREAMING_SNAKE_CASE=28,__SCREAMING_SNAKE_CASE=16,__SCREAMING_SNAKE_CASE=64,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="gelu_new",__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=1e-5,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=False,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_embd
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_inner
__lowerCAmelCase = rotary_dim
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = use_cache
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(
bos_token_id=__SCREAMING_SNAKE_CASE,eos_token_id=__SCREAMING_SNAKE_CASE,tie_word_embeddings=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = "default",__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False,):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE,task=__SCREAMING_SNAKE_CASE,patching_specs=__SCREAMING_SNAKE_CASE,use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config,"""pad_token_id""",__SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
__lowerCAmelCase = 0
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE,direction="""inputs""" )
__lowerCAmelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._config.n_head
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,):
'''simple docstring'''
__lowerCAmelCase = super(__SCREAMING_SNAKE_CASE,self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE,batch_size=__SCREAMING_SNAKE_CASE,seq_length=__SCREAMING_SNAKE_CASE,is_pair=__SCREAMING_SNAKE_CASE,framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowerCAmelCase = seqlen + 2
__lowerCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCAmelCase = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
__lowerCAmelCase = common_inputs["""attention_mask"""]
if self.use_past:
__lowerCAmelCase = ordered_inputs["""attention_mask"""].dtype
__lowerCAmelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,dtype=__SCREAMING_SNAKE_CASE )],dim=1 )
return ordered_inputs
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 13
| 689 | 1 |
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_a : str = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
_a : int = get_tests_dir("""fixtures/vocab.json""")
_a : Optional[Any] = get_tests_dir("""fixtures""")
class _UpperCAmelCase ( unittest.TestCase ):
a : Optional[Any] =["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = 0
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = WavaVecaConfig()
__lowerCAmelCase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = AutoProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(__SCREAMING_SNAKE_CASE,os.path.join(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) )
copyfile(__SCREAMING_SNAKE_CASE,os.path.join(__SCREAMING_SNAKE_CASE,"""vocab.json""" ) )
__lowerCAmelCase = AutoProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = WavaVecaFeatureExtractor()
__lowerCAmelCase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
__lowerCAmelCase = WavaVecaProcessor(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# save in new folder
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
# drop `processor_class` in tokenizer
with open(os.path.join(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ),"""r""" ) as f:
__lowerCAmelCase = json.load(__SCREAMING_SNAKE_CASE )
config_dict.pop("""processor_class""" )
with open(os.path.join(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ),"""w""" ) as f:
f.write(json.dumps(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = AutoProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = WavaVecaFeatureExtractor()
__lowerCAmelCase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
__lowerCAmelCase = WavaVecaProcessor(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# save in new folder
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
# drop `processor_class` in feature extractor
with open(os.path.join(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ),"""r""" ) as f:
__lowerCAmelCase = json.load(__SCREAMING_SNAKE_CASE )
config_dict.pop("""processor_class""" )
with open(os.path.join(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ),"""w""" ) as f:
f.write(json.dumps(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = AutoProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(__SCREAMING_SNAKE_CASE )
# copy relevant files
copyfile(__SCREAMING_SNAKE_CASE,os.path.join(__SCREAMING_SNAKE_CASE,"""vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ),"""w""" ) as f:
f.write("""{}""" )
__lowerCAmelCase = AutoProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""",trust_remote_code=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""",trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__,"""NewProcessor""" )
__lowerCAmelCase = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__,"""NewFeatureExtractor""" )
__lowerCAmelCase = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__,"""NewTokenizerFast""" )
# Test we can also load the slow version
__lowerCAmelCase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""",trust_remote_code=__SCREAMING_SNAKE_CASE,use_fast=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__,"""NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__,"""NewTokenizer""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
try:
AutoConfig.register("""custom""",__SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
AutoTokenizer.register(__SCREAMING_SNAKE_CASE,slow_tokenizer_class=__SCREAMING_SNAKE_CASE )
AutoProcessor.register(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
AutoProcessor.register(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowerCAmelCase = CustomFeatureExtractor.from_pretrained(__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE,"""vocab.txt""" )
with open(__SCREAMING_SNAKE_CASE,"""w""",encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
__lowerCAmelCase = CustomTokenizer(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = CustomProcessor(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = AutoProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self ):
'''simple docstring'''
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Any =False
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Any =False
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[str] ="""AutoFeatureExtractor"""
a : Optional[int] ="""AutoTokenizer"""
a : List[str] =False
try:
AutoConfig.register("""custom""",__SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
AutoTokenizer.register(__SCREAMING_SNAKE_CASE,slow_tokenizer_class=__SCREAMING_SNAKE_CASE )
AutoProcessor.register(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local classes.
__lowerCAmelCase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__,"""NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
__lowerCAmelCase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""",trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(processor.__class__.__name__,"""NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
__lowerCAmelCase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""",trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(processor.__class__.__name__,"""NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__,"""BertTokenizerFast""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__,"""ConvNextImageProcessor""" )
@is_staging_test
class _UpperCAmelCase ( unittest.TestCase ):
a : str =["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def lowerCamelCase__ ( cls ):
'''simple docstring'''
__lowerCAmelCase = TOKEN
HfFolder.save_token(__SCREAMING_SNAKE_CASE )
@classmethod
def lowerCamelCase__ ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token,repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = WavaVecaProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__SCREAMING_SNAKE_CASE,"""test-processor""" ),push_to_hub=__SCREAMING_SNAKE_CASE,use_auth_token=self._token )
__lowerCAmelCase = WavaVecaProcessor.from_pretrained(f'{USER}/test-processor' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__SCREAMING_SNAKE_CASE,getattr(new_processor.feature_extractor,__SCREAMING_SNAKE_CASE ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab(),processor.tokenizer.get_vocab() )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = WavaVecaProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__SCREAMING_SNAKE_CASE,"""test-processor-org""" ),push_to_hub=__SCREAMING_SNAKE_CASE,use_auth_token=self._token,organization="""valid_org""",)
__lowerCAmelCase = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__SCREAMING_SNAKE_CASE,getattr(new_processor.feature_extractor,__SCREAMING_SNAKE_CASE ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab(),processor.tokenizer.get_vocab() )
def lowerCamelCase__ ( self ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
__lowerCAmelCase = CustomFeatureExtractor.from_pretrained(__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE,"""vocab.txt""" )
with open(__SCREAMING_SNAKE_CASE,"""w""",encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
__lowerCAmelCase = CustomTokenizer(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = CustomProcessor(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f'{USER}/test-dynamic-processor',token=self._token )
__lowerCAmelCase = Repository(__SCREAMING_SNAKE_CASE,clone_from=f'{USER}/test-dynamic-processor',token=self._token )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map,{
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
},)
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(__SCREAMING_SNAKE_CASE,"""tokenizer_config.json""" ) ) as f:
__lowerCAmelCase = json.load(__SCREAMING_SNAKE_CASE )
self.assertDictEqual(
tokenizer_config["""auto_map"""],{
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
},)
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(__SCREAMING_SNAKE_CASE,"""custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__SCREAMING_SNAKE_CASE,"""custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__SCREAMING_SNAKE_CASE,"""custom_processing.py""" ) ) )
repo.push_to_hub()
__lowerCAmelCase = AutoProcessor.from_pretrained(f'{USER}/test-dynamic-processor',trust_remote_code=__SCREAMING_SNAKE_CASE )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__,"""CustomProcessor""" )
| 689 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase = 5000_0000 ) -> int:
__lowerCAmelCase = set()
__lowerCAmelCase = int((limit - 24) ** (1 / 2) )
__lowerCAmelCase = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowercase ) ) )
for primea in primes:
__lowerCAmelCase = primea * primea
for primea in primes:
__lowerCAmelCase = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
__lowerCAmelCase = primea * primea * primea * primea
__lowerCAmelCase = square + cube + tetr
if total >= limit:
break
ret.add(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(f'{solution() = }')
| 689 | 1 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_a : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_a : List[Any] = """ def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir,"""models/bert/""" ) )
__lowerCAmelCase = self.transformer_dir
shutil.copy(
os.path.join(__SCREAMING_SNAKE_CASE,"""src/transformers/models/bert/modeling_bert.py""" ),os.path.join(self.transformer_dir,"""models/bert/modeling_bert.py""" ),)
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
__lowerCAmelCase = comment + f'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
__lowerCAmelCase = comment + f'\nclass {class_name}(nn.Module):\n' + overwrite_result
__lowerCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa},line_length=1_19 )
__lowerCAmelCase = black.format_str(__SCREAMING_SNAKE_CASE,mode=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = os.path.join(self.transformer_dir,"""new_code.py""" )
with open(__SCREAMING_SNAKE_CASE,"""w""",newline="""\n""" ) as f:
f.write(__SCREAMING_SNAKE_CASE )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__SCREAMING_SNAKE_CASE ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name,overwrite=__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE,"""r""" ) as f:
self.assertTrue(f.read(),__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""","""BertLMPredictionHead""",REFERENCE_CODE + """\n""",)
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""","""BertLMPredictionHead""",__SCREAMING_SNAKE_CASE,)
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""","""TestModelLMPredictionHead""",re.sub("""Bert""","""TestModel""",__SCREAMING_SNAKE_CASE ),)
# Copy consistency with a really long name
__lowerCAmelCase = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}',f'{long_class_name}LMPredictionHead',re.sub("""Bert""",__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ),)
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""","""TestModelLMPredictionHead""",__SCREAMING_SNAKE_CASE,overwrite_result=re.sub("""Bert""","""TestModel""",__SCREAMING_SNAKE_CASE ),)
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
__lowerCAmelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
__lowerCAmelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
__lowerCAmelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
__lowerCAmelCase , __lowerCAmelCase = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,localized_readme["""format_model_list"""] )
self.assertFalse(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
__lowerCAmelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
__lowerCAmelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
__lowerCAmelCase , __lowerCAmelCase = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
| 689 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Optional[int] =TextToVideoSDPipeline
a : Optional[int] =TEXT_TO_IMAGE_PARAMS
a : Any =TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a : Union[str, Any] =frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D"""),up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D"""),cross_attention_dim=32,attention_head_dim=4,)
__lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0085,beta_end=0.012,beta_schedule="""scaled_linear""",clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,)
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],latent_channels=4,sample_size=1_28,)
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1e-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=10_00,hidden_act="""gelu""",projection_dim=5_12,)
__lowerCAmelCase = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = TextToVideoSDPipeline(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = """np"""
__lowerCAmelCase = sd_pipe(**__SCREAMING_SNAKE_CASE ).frames
__lowerCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__lowerCAmelCase = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available(),reason="""XFormers attention is only available with CUDA and `xformers` installed""",)
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
__lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowerCAmelCase = pipe.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=25,output_type="""pt""" ).frames
__lowerCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
__lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase = pipe.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=2,output_type="""pt""" ).frames
__lowerCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 689 | 1 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : str = logging.get_logger(__name__)
_a : List[Any] = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[str] ="""encodec"""
def __init__( self,__SCREAMING_SNAKE_CASE=[1.5, 3.0, 6.0, 12.0, 24.0],__SCREAMING_SNAKE_CASE=2_40_00,__SCREAMING_SNAKE_CASE=1,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=1_28,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE=1,__SCREAMING_SNAKE_CASE=[8, 5, 4, 2],__SCREAMING_SNAKE_CASE="weight_norm",__SCREAMING_SNAKE_CASE=7,__SCREAMING_SNAKE_CASE=7,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE="reflect",__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=1.0,__SCREAMING_SNAKE_CASE=10_24,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=True,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = target_bandwidths
__lowerCAmelCase = sampling_rate
__lowerCAmelCase = audio_channels
__lowerCAmelCase = normalize
__lowerCAmelCase = chunk_length_s
__lowerCAmelCase = overlap
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_filters
__lowerCAmelCase = num_residual_layers
__lowerCAmelCase = upsampling_ratios
__lowerCAmelCase = norm_type
__lowerCAmelCase = kernel_size
__lowerCAmelCase = last_kernel_size
__lowerCAmelCase = residual_kernel_size
__lowerCAmelCase = dilation_growth_rate
__lowerCAmelCase = use_causal_conv
__lowerCAmelCase = pad_mode
__lowerCAmelCase = compress
__lowerCAmelCase = num_lstm_layers
__lowerCAmelCase = trim_right_ratio
__lowerCAmelCase = codebook_size
__lowerCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
__lowerCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**__SCREAMING_SNAKE_CASE )
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1,int((1.0 - self.overlap) * self.chunk_length ) )
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 689 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _lowerCAmelCase ( lowercase ) -> Optional[int]:
if not is_accelerate_available():
return method
__lowerCAmelCase = version.parse(accelerate.__version__ ).base_version
if version.parse(lowercase ) < version.parse("""0.17.0""" ):
return method
def wrapper(self , *lowercase , **lowercase ):
if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ):
self._hf_hook.pre_forward(self )
return method(self , *lowercase , **lowercase )
return wrapper
| 689 | 1 |
'''simple docstring'''
from cva import destroyAllWindows, imread, imshow, waitKey
def _lowerCAmelCase ( lowercase ) -> Tuple:
# getting number of pixels in the image
__lowerCAmelCase , __lowerCAmelCase = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(lowercase ):
for j in range(lowercase ):
__lowerCAmelCase = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
_a : Optional[Any] = imread("""image_data/lena.jpg""", 1)
# convert to its negative
_a : Any = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 689 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
# load base model
__lowerCAmelCase = StableDiffusionPipeline.from_pretrained(lowercase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
__lowerCAmelCase = load_file(lowercase )
__lowerCAmelCase = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
__lowerCAmelCase = pipeline.text_encoder
else:
__lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
__lowerCAmelCase = pipeline.unet
# find the target layer
__lowerCAmelCase = layer_infos.pop(0 )
while len(lowercase ) > -1:
try:
__lowerCAmelCase = curr_layer.__getattr__(lowercase )
if len(lowercase ) > 0:
__lowerCAmelCase = layer_infos.pop(0 )
elif len(lowercase ) == 0:
break
except Exception:
if len(lowercase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
__lowerCAmelCase = layer_infos.pop(0 )
__lowerCAmelCase = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(lowercase )
else:
pair_keys.append(lowercase )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
__lowerCAmelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
__lowerCAmelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase ).unsqueeze(2 ).unsqueeze(3 )
else:
__lowerCAmelCase = state_dict[pair_keys[0]].to(torch.floataa )
__lowerCAmelCase = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase )
# update visited list
for item in pair_keys:
visited.append(lowercase )
return pipeline
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
_a : Optional[int] = parser.parse_args()
_a : Dict = args.base_model_path
_a : Optional[Any] = args.checkpoint_path
_a : Union[str, Any] = args.dump_path
_a : Optional[int] = args.lora_prefix_unet
_a : int = args.lora_prefix_text_encoder
_a : str = args.alpha
_a : Any = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_a : Tuple = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 689 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase , lowercase ) -> int:
return int((input_a, input_a).count(0 ) == 0 )
def _lowerCAmelCase ( ) -> None:
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 689 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def _lowerCAmelCase ( lowercase = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2
def _lowerCAmelCase ( lowercase = "" ) -> bool:
if len(lowercase ) == 0:
return True
__lowerCAmelCase = input_str.replace(""" """ , """""" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__lowerCAmelCase = {}
for character in lower_case_input_str:
__lowerCAmelCase = character_freq_dict.get(lowercase , 0 ) + 1
__lowerCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _lowerCAmelCase ( lowercase = "" ) -> None:
print("""\nFor string = """ , lowercase , """:""" )
print(
"""> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(lowercase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
print(
"""> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(lowercase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
if __name__ == "__main__":
_a : int = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
_a : Optional[int] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
| 689 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_a : Any = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : str =["""pixel_values"""]
def __init__( self,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE = 1 / 2_55,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = True,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = size if size is not None else {"""shortest_edge""": 2_24}
__lowerCAmelCase = get_size_dict(__SCREAMING_SNAKE_CASE,default_to_square=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
__lowerCAmelCase = get_size_dict(__SCREAMING_SNAKE_CASE,default_to_square=__SCREAMING_SNAKE_CASE,param_name="""crop_size""" )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = resample
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = crop_size
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowerCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
__lowerCAmelCase = do_convert_rgb
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = get_size_dict(__SCREAMING_SNAKE_CASE,default_to_square=__SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
__lowerCAmelCase = get_resize_output_image_size(__SCREAMING_SNAKE_CASE,size=size["""shortest_edge"""],default_to_square=__SCREAMING_SNAKE_CASE )
return resize(__SCREAMING_SNAKE_CASE,size=__SCREAMING_SNAKE_CASE,resample=__SCREAMING_SNAKE_CASE,data_format=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = get_size_dict(__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(__SCREAMING_SNAKE_CASE,size=(size["""height"""], size["""width"""]),data_format=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
return rescale(__SCREAMING_SNAKE_CASE,scale=__SCREAMING_SNAKE_CASE,data_format=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
return normalize(__SCREAMING_SNAKE_CASE,mean=__SCREAMING_SNAKE_CASE,std=__SCREAMING_SNAKE_CASE,data_format=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = ChannelDimension.FIRST,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(__SCREAMING_SNAKE_CASE,param_name="""size""",default_to_square=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase = get_size_dict(__SCREAMING_SNAKE_CASE,param_name="""crop_size""",default_to_square=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase = image_std if image_std is not None else self.image_std
__lowerCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowerCAmelCase = make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowerCAmelCase = [convert_to_rgb(__SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
__lowerCAmelCase = [self.resize(image=__SCREAMING_SNAKE_CASE,size=__SCREAMING_SNAKE_CASE,resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
__lowerCAmelCase = [self.center_crop(image=__SCREAMING_SNAKE_CASE,size=__SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
__lowerCAmelCase = [self.rescale(image=__SCREAMING_SNAKE_CASE,scale=__SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
__lowerCAmelCase = [self.normalize(image=__SCREAMING_SNAKE_CASE,mean=__SCREAMING_SNAKE_CASE,std=__SCREAMING_SNAKE_CASE ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) for image in images]
__lowerCAmelCase = {"""pixel_values""": images}
return BatchFeature(data=__SCREAMING_SNAKE_CASE,tensor_type=__SCREAMING_SNAKE_CASE )
| 689 |
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _lowerCAmelCase ( lowercase ) -> List[Any]:
__lowerCAmelCase = VideoMAEConfig()
set_architecture_configs(lowercase , lowercase )
if "finetuned" not in model_name:
__lowerCAmelCase = False
if "finetuned" in model_name:
__lowerCAmelCase = """huggingface/label-files"""
if "kinetics" in model_name:
__lowerCAmelCase = 400
__lowerCAmelCase = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
__lowerCAmelCase = 174
__lowerCAmelCase = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
__lowerCAmelCase = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="""dataset""" ) , """r""" ) )
__lowerCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase ( lowercase , lowercase ) -> Any:
if "small" in model_name:
__lowerCAmelCase = 384
__lowerCAmelCase = 1536
__lowerCAmelCase = 12
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 3
__lowerCAmelCase = 192
__lowerCAmelCase = 768
elif "large" in model_name:
__lowerCAmelCase = 1024
__lowerCAmelCase = 4096
__lowerCAmelCase = 24
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 8
__lowerCAmelCase = 512
__lowerCAmelCase = 2048
elif "huge" in model_name:
__lowerCAmelCase = 1280
__lowerCAmelCase = 5120
__lowerCAmelCase = 32
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 8
__lowerCAmelCase = 640
__lowerCAmelCase = 2560
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def _lowerCAmelCase ( lowercase ) -> List[str]:
if "encoder." in name:
__lowerCAmelCase = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
__lowerCAmelCase = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
__lowerCAmelCase = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
__lowerCAmelCase = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__lowerCAmelCase = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
__lowerCAmelCase = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
__lowerCAmelCase = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
__lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
__lowerCAmelCase = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
__lowerCAmelCase = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
__lowerCAmelCase = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
__lowerCAmelCase = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
__lowerCAmelCase = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__lowerCAmelCase = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__lowerCAmelCase = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
__lowerCAmelCase = name.replace("""head""" , """classifier""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]:
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(lowercase )
if key.startswith("""encoder.""" ):
__lowerCAmelCase = key.replace("""encoder.""" , """""" )
if "qkv" in key:
__lowerCAmelCase = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
__lowerCAmelCase = config.decoder_hidden_size
__lowerCAmelCase = int(key_split[2] )
__lowerCAmelCase = """decoder.decoder_layers."""
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = config.hidden_size
__lowerCAmelCase = int(key_split[1] )
__lowerCAmelCase = """videomae.encoder.layer."""
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val
return orig_state_dict
def _lowerCAmelCase ( ) -> str:
__lowerCAmelCase = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__lowerCAmelCase = np.load(lowercase )
return list(lowercase )
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__lowerCAmelCase = get_videomae_config(lowercase )
if "finetuned" in model_name:
__lowerCAmelCase = VideoMAEForVideoClassification(lowercase )
else:
__lowerCAmelCase = VideoMAEForPreTraining(lowercase )
# download original checkpoint, hosted on Google Drive
__lowerCAmelCase = """pytorch_model.bin"""
gdown.cached_download(lowercase , lowercase , quiet=lowercase )
__lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" )
if "model" in files:
__lowerCAmelCase = files["""model"""]
else:
__lowerCAmelCase = files["""module"""]
__lowerCAmelCase = convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
# verify model on basic input
__lowerCAmelCase = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__lowerCAmelCase = prepare_video()
__lowerCAmelCase = image_processor(lowercase , return_tensors="""pt""" )
if "finetuned" not in model_name:
__lowerCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
__lowerCAmelCase = torch.load(lowercase )
__lowerCAmelCase = model(**lowercase )
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
__lowerCAmelCase = torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 400] )
__lowerCAmelCase = torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
__lowerCAmelCase = torch.Size([1, 1408, 1536] )
__lowerCAmelCase = torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 174] )
__lowerCAmelCase = torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(f'Model name not supported. Should be one of {model_names}' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , lowercase , atol=1e-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__lowerCAmelCase = outputs.loss
assert torch.allclose(lowercase , lowercase , atol=1e-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model and image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase )
model.save_pretrained(lowercase )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase , organization="""nielsr""" )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_a : int = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 689 | 1 |
'''simple docstring'''
import datasets
_a : Tuple = """\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
_a : Any = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
_a : List[Any] = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def _lowerCAmelCase ( lowercase , lowercase ) -> Tuple:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
} ),codebase_urls=[],reference_urls=[],format="""numpy""",)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return {"accuracy": simple_accuracy(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )}
| 689 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_a : Tuple = """\
"""
_a : Tuple = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
_a : Optional[Any] = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ),reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = 16,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__lowerCAmelCase = """cuda"""
else:
__lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__lowerCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__SCREAMING_SNAKE_CASE ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__lowerCAmelCase = model.config.max_length - 1
else:
__lowerCAmelCase = model.config.max_length
__lowerCAmelCase = tokenizer(
__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,padding=__SCREAMING_SNAKE_CASE,truncation=__SCREAMING_SNAKE_CASE,max_length=__SCREAMING_SNAKE_CASE,return_tensors="""pt""",return_attention_mask=__SCREAMING_SNAKE_CASE,).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = encodings["""input_ids"""]
__lowerCAmelCase = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ),1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ),2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__lowerCAmelCase = []
__lowerCAmelCase = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0,len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase = min(start_index + batch_size,len(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = encoded_texts[start_index:end_index]
__lowerCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
__lowerCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch],dim=1 )
__lowerCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size(),dtype=torch.intaa ).to(__SCREAMING_SNAKE_CASE ), attn_mask],dim=1 )
__lowerCAmelCase = encoded_batch
with torch.no_grad():
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE ).logits
__lowerCAmelCase = out_logits[..., :-1, :].contiguous()
__lowerCAmelCase = labels[..., 1:].contiguous()
__lowerCAmelCase = attn_mask[..., 1:].contiguous()
__lowerCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1,2 ),__SCREAMING_SNAKE_CASE ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__SCREAMING_SNAKE_CASE )}
| 689 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
a : Optional[Any] =CycleDiffusionPipeline
a : Any =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""negative_prompt""",
"""height""",
"""width""",
"""negative_prompt_embeds""",
}
a : List[str] =PipelineTesterMixin.required_optional_params - {"""latents"""}
a : Dict =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""} )
a : Union[str, Any] =IMAGE_TO_IMAGE_IMAGE_PARAMS
a : Union[str, Any] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D"""),up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D"""),cross_attention_dim=32,)
__lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0085,beta_end=0.012,beta_schedule="""scaled_linear""",num_train_timesteps=10_00,clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,)
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],latent_channels=4,)
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1e-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=10_00,)
__lowerCAmelCase = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
__lowerCAmelCase = floats_tensor((1, 3, 32, 32),rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = image / 2 + 0.5
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = CycleDiffusionPipeline(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = output.images
__lowerCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowerCAmelCase = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != """cuda""","""This test requires a GPU""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.get_dummy_components()
for name, module in components.items():
if hasattr(__SCREAMING_SNAKE_CASE,"""half""" ):
__lowerCAmelCase = module.half()
__lowerCAmelCase = CycleDiffusionPipeline(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = output.images
__lowerCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowerCAmelCase = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCamelCase__ ( self ):
'''simple docstring'''
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
return super().test_inference_batch_single_identical()
@skip_mps
def lowerCamelCase__ ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCamelCase__ ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def lowerCamelCase__ ( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
__lowerCAmelCase = init_image.resize((5_12, 5_12) )
__lowerCAmelCase = """CompVis/stable-diffusion-v1-4"""
__lowerCAmelCase = DDIMScheduler.from_pretrained(__SCREAMING_SNAKE_CASE,subfolder="""scheduler""" )
__lowerCAmelCase = CycleDiffusionPipeline.from_pretrained(
__SCREAMING_SNAKE_CASE,scheduler=__SCREAMING_SNAKE_CASE,safety_checker=__SCREAMING_SNAKE_CASE,torch_dtype=torch.floataa,revision="""fp16""" )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
__lowerCAmelCase = """A black colored car"""
__lowerCAmelCase = """A blue colored car"""
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pipe(
prompt=__SCREAMING_SNAKE_CASE,source_prompt=__SCREAMING_SNAKE_CASE,image=__SCREAMING_SNAKE_CASE,num_inference_steps=1_00,eta=0.1,strength=0.85,guidance_scale=3,source_guidance_scale=1,generator=__SCREAMING_SNAKE_CASE,output_type="""np""",)
__lowerCAmelCase = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
__lowerCAmelCase = init_image.resize((5_12, 5_12) )
__lowerCAmelCase = """CompVis/stable-diffusion-v1-4"""
__lowerCAmelCase = DDIMScheduler.from_pretrained(__SCREAMING_SNAKE_CASE,subfolder="""scheduler""" )
__lowerCAmelCase = CycleDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE,scheduler=__SCREAMING_SNAKE_CASE,safety_checker=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
__lowerCAmelCase = """A black colored car"""
__lowerCAmelCase = """A blue colored car"""
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pipe(
prompt=__SCREAMING_SNAKE_CASE,source_prompt=__SCREAMING_SNAKE_CASE,image=__SCREAMING_SNAKE_CASE,num_inference_steps=1_00,eta=0.1,strength=0.85,guidance_scale=3,source_guidance_scale=1,generator=__SCREAMING_SNAKE_CASE,output_type="""np""",)
__lowerCAmelCase = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 689 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Union[str, Any] =["""image_processor"""]
a : Dict ="""SamImageProcessor"""
def __init__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.image_processor
__lowerCAmelCase = -10
__lowerCAmelCase = self.image_processor.size["""longest_edge"""]
def __call__( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = self.image_processor(
__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
# pop arguments that are not used in the foward but used nevertheless
__lowerCAmelCase = encoding_image_processor["""original_sizes"""]
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks if Torch or TF tensor
__lowerCAmelCase = original_sizes.numpy()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._check_and_preprocess_points(
input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = self._normalize_and_convert(
__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,)
return encoding_image_processor
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="pt",):
'''simple docstring'''
if input_points is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0] ) for point in input_points
]
else:
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
for point, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__lowerCAmelCase , __lowerCAmelCase = self._pad_points_and_labels(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_labels is not None:
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0],is_bounding_box=__SCREAMING_SNAKE_CASE )
for box in input_boxes
]
else:
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,is_bounding_box=__SCREAMING_SNAKE_CASE )
for box, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
]
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
__lowerCAmelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points} )
if input_labels is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels} )
return encoding_image_processor
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = max([point.shape[0] for point in input_points] )
__lowerCAmelCase = []
for i, point in enumerate(__SCREAMING_SNAKE_CASE ):
if point.shape[0] != expected_nb_points:
__lowerCAmelCase = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value],axis=0 )
__lowerCAmelCase = np.append(input_labels[i],[self.point_pad_value] )
processed_input_points.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = processed_input_points
return input_points, input_labels
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = original_size
__lowerCAmelCase , __lowerCAmelCase = self.image_processor._get_preprocess_shape(__SCREAMING_SNAKE_CASE,longest_edge=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = deepcopy(__SCREAMING_SNAKE_CASE ).astype(__SCREAMING_SNAKE_CASE )
if is_bounding_box:
__lowerCAmelCase = coords.reshape(-1,2,2 )
__lowerCAmelCase = coords[..., 0] * (new_w / old_w)
__lowerCAmelCase = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__lowerCAmelCase = coords.reshape(-1,4 )
return coords
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,):
'''simple docstring'''
if input_points is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks for TF or Torch tensor
__lowerCAmelCase = input_points.numpy().tolist()
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_points[0],__SCREAMING_SNAKE_CASE ):
raise ValueError("""Input points must be a list of list of floating points.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for input_point in input_points]
else:
__lowerCAmelCase = None
if input_labels is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ):
__lowerCAmelCase = input_labels.numpy().tolist()
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_labels[0],__SCREAMING_SNAKE_CASE ):
raise ValueError("""Input labels must be a list of list integers.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for label in input_labels]
else:
__lowerCAmelCase = None
if input_boxes is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ):
__lowerCAmelCase = input_boxes.numpy().tolist()
if (
not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0],__SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0][0],__SCREAMING_SNAKE_CASE )
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa ) for box in input_boxes]
else:
__lowerCAmelCase = None
return input_points, input_labels, input_boxes
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(__SCREAMING_SNAKE_CASE ) )
def lowerCamelCase__ ( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.image_processor.post_process_masks(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
| 689 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase , lowercase ) -> str:
if not isinstance(lowercase , lowercase ):
raise ValueError("""iterations must be defined as integers""" )
if not isinstance(lowercase , lowercase ) or not number >= 1:
raise ValueError(
"""starting number must be
and integer and be more than 0""" )
if not iterations >= 1:
raise ValueError("""Iterations must be done more than 0 times to play FizzBuzz""" )
__lowerCAmelCase = """"""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(lowercase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 689 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
_a : int = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
a : Optional[str] =field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
a : Optional[str] =field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
a : int =field(
default=10_24 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
a : bool =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the training data."""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the validation data."""} )
a : Optional[str] =field(default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the test data."""} )
def lowerCamelCase__ ( self ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
__lowerCAmelCase = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__lowerCAmelCase = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _UpperCAmelCase :
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a : str =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def _lowerCAmelCase ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
__lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(lowercase )
datasets.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__lowerCAmelCase = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__lowerCAmelCase = data_args.train_file.split(""".""" )[-1]
__lowerCAmelCase = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__lowerCAmelCase = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(f'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
__lowerCAmelCase = load_dataset("""csv""" , data_files=lowercase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__lowerCAmelCase = load_dataset("""json""" , data_files=lowercase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__lowerCAmelCase = raw_datasets["""train"""].features["""label"""].names
__lowerCAmelCase = len(lowercase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__lowerCAmelCase = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowercase , )
__lowerCAmelCase = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__lowerCAmelCase = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__lowerCAmelCase = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__lowerCAmelCase = {"""Refused""": 0, """Entailed""": 1}
__lowerCAmelCase = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
__lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowercase ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowercase ):
__lowerCAmelCase = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
__lowerCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__lowerCAmelCase = examples["""statement"""]
__lowerCAmelCase = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
__lowerCAmelCase = tokenizer(lowercase , lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase )
__lowerCAmelCase = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
__lowerCAmelCase = raw_datasets.map(
lowercase , batched=lowercase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__lowerCAmelCase = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
__lowerCAmelCase = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__lowerCAmelCase = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
__lowerCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
__lowerCAmelCase = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
__lowerCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowercase ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase ):
__lowerCAmelCase = p.predictions[0] if isinstance(p.predictions , lowercase ) else p.predictions
__lowerCAmelCase = np.argmax(lowercase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__lowerCAmelCase = default_data_collator
elif training_args.fpaa:
__lowerCAmelCase = DataCollatorWithPadding(lowercase , pad_to_multiple_of=8 )
else:
__lowerCAmelCase = None
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=lowercase , args=lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase , tokenizer=lowercase , data_collator=lowercase , )
# Training
if training_args.do_train:
__lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowercase )
__lowerCAmelCase = train_result.metrics
__lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase )
)
__lowerCAmelCase = min(lowercase , len(lowercase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , lowercase )
trainer.save_metrics("""train""" , lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowerCAmelCase = trainer.evaluate(eval_dataset=lowercase )
__lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase )
__lowerCAmelCase = min(lowercase , len(lowercase ) )
trainer.log_metrics("""eval""" , lowercase )
trainer.save_metrics("""eval""" , lowercase )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__lowerCAmelCase = predict_dataset.remove_columns("""label""" )
__lowerCAmelCase = trainer.predict(lowercase , metric_key_prefix="""predict""" ).predictions
__lowerCAmelCase = np.argmax(lowercase , axis=1 )
__lowerCAmelCase = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(lowercase , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(lowercase ):
__lowerCAmelCase = label_list[item]
writer.write(f'{index}\t{item}\n' )
__lowerCAmelCase = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase )
else:
trainer.create_model_card(**lowercase )
def _lowerCAmelCase ( lowercase ) -> str:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 689 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=10,__SCREAMING_SNAKE_CASE=[10, 20, 30, 40],__SCREAMING_SNAKE_CASE=[1, 1, 2, 1],__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE="relu",__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=None,):
'''simple docstring'''
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = embeddings_size
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_labels
__lowerCAmelCase = scope
__lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size],self.num_labels )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels,embeddings_size=self.embeddings_size,hidden_sizes=self.hidden_sizes,depths=self.depths,hidden_act=self.hidden_act,num_labels=self.num_labels,)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = RegNetModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32),)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = RegNetForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
a : Tuple =(RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
a : int =(
{"""feature-extraction""": RegNetModel, """image-classification""": RegNetForImageClassification}
if is_torch_available()
else {}
)
a : Dict =False
a : Union[str, Any] =False
a : Tuple =False
a : str =False
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = RegNetModelTester(self )
__lowerCAmelCase = ConfigTester(self,config_class=__SCREAMING_SNAKE_CASE,has_text_modality=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ ( self ):
'''simple docstring'''
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1],__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(config=__SCREAMING_SNAKE_CASE )
for name, module in model.named_modules():
if isinstance(__SCREAMING_SNAKE_CASE,(nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ),msg=f'Parameter {name} of model {model_class} seems not properly initialized',)
self.assertTrue(
torch.all(module.bias == 0 ),msg=f'Parameter {name} of model {model_class} seems not properly initialized',)
def lowerCamelCase__ ( self ):
'''simple docstring'''
def check_hidden_states_output(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(__SCREAMING_SNAKE_CASE ),expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ),[self.model_tester.image_size // 2, self.model_tester.image_size // 2],)
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__lowerCAmelCase = layer_type
__lowerCAmelCase = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = RegNetModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( ) -> str:
__lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
__lowerCAmelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3],__SCREAMING_SNAKE_CASE,atol=1e-4 ) )
| 689 |
'''simple docstring'''
import os
import sys
import unittest
_a : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_a : Union[str, Any] = os.path.join(git_repo_path, """src""", """diffusers""")
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = find_backend(""" if not is_torch_available():""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__lowerCAmelCase = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__lowerCAmelCase = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""torch_and_transformers_and_onnx""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""",__SCREAMING_SNAKE_CASE )
self.assertIn("""torch_and_transformers""",__SCREAMING_SNAKE_CASE )
self.assertIn("""flax_and_transformers""",__SCREAMING_SNAKE_CASE )
self.assertIn("""torch_and_transformers_and_onnx""",__SCREAMING_SNAKE_CASE )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""",objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""",objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""",objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""",objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""",objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""",objects["""torch_and_transformers_and_onnx"""] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = create_dummy_object("""CONSTANT""","""'torch'""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,"""\nCONSTANT = None\n""" )
__lowerCAmelCase = create_dummy_object("""function""","""'torch'""" )
self.assertEqual(
__SCREAMING_SNAKE_CASE,"""\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
__lowerCAmelCase = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
__lowerCAmelCase = create_dummy_object("""FakeClass""","""'torch'""" )
self.assertEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
__lowerCAmelCase = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""],__SCREAMING_SNAKE_CASE )
| 689 | 1 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _UpperCAmelCase :
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=13,__SCREAMING_SNAKE_CASE=7,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=99,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE=5,__SCREAMING_SNAKE_CASE=4,__SCREAMING_SNAKE_CASE=37,__SCREAMING_SNAKE_CASE="gelu",__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=5_12,__SCREAMING_SNAKE_CASE=16,__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=4,__SCREAMING_SNAKE_CASE=None,):
'''simple docstring'''
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size],self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size],self.num_choices )
__lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self ):
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__SCREAMING_SNAKE_CASE,initializer_range=self.initializer_range,use_stable_embedding=__SCREAMING_SNAKE_CASE,)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = OpenLlamaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = True
__lowerCAmelCase = OpenLlamaModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,encoder_hidden_states=__SCREAMING_SNAKE_CASE,encoder_attention_mask=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,encoder_hidden_states=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = OpenLlamaForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = OpenLlamaForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
# first forward pass
__lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,encoder_hidden_states=__SCREAMING_SNAKE_CASE,encoder_attention_mask=__SCREAMING_SNAKE_CASE,use_cache=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((self.batch_size, 3),config.vocab_size )
__lowerCAmelCase = ids_tensor((self.batch_size, 3),vocab_size=2 )
# append to next input_ids and
__lowerCAmelCase = torch.cat([input_ids, next_tokens],dim=-1 )
__lowerCAmelCase = torch.cat([input_mask, next_mask],dim=-1 )
__lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,encoder_hidden_states=__SCREAMING_SNAKE_CASE,encoder_attention_mask=__SCREAMING_SNAKE_CASE,output_hidden_states=__SCREAMING_SNAKE_CASE,)["""hidden_states"""][0]
__lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,encoder_hidden_states=__SCREAMING_SNAKE_CASE,encoder_attention_mask=__SCREAMING_SNAKE_CASE,past_key_values=__SCREAMING_SNAKE_CASE,output_hidden_states=__SCREAMING_SNAKE_CASE,)["""hidden_states"""][0]
# select random slice
__lowerCAmelCase = ids_tensor((1,),output_from_past.shape[-1] ).item()
__lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,atol=1e-3 ) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
a : List[str] =(
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
a : int =(OpenLlamaForCausalLM,) if is_torch_available() else ()
a : List[str] =(
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
a : List[str] =False
a : Optional[Any] =False
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = OpenLlamaModelTester(self )
__lowerCAmelCase = ConfigTester(self,config_class=__SCREAMING_SNAKE_CASE,hidden_size=37 )
def lowerCamelCase__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = 3
__lowerCAmelCase = input_dict["""input_ids"""]
__lowerCAmelCase = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = ids_tensor([self.model_tester.batch_size],self.model_tester.type_sequence_label_size )
__lowerCAmelCase = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape,(self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = 3
__lowerCAmelCase = """single_label_classification"""
__lowerCAmelCase = input_dict["""input_ids"""]
__lowerCAmelCase = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = ids_tensor([self.model_tester.batch_size],self.model_tester.type_sequence_label_size )
__lowerCAmelCase = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape,(self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = 3
__lowerCAmelCase = """multi_label_classification"""
__lowerCAmelCase = input_dict["""input_ids"""]
__lowerCAmelCase = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels],self.model_tester.type_sequence_label_size ).to(torch.float )
__lowerCAmelCase = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = ids_tensor([1, 10],config.vocab_size )
__lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )],config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowerCAmelCase = OpenLlamaModel(__SCREAMING_SNAKE_CASE )
original_model.to(__SCREAMING_SNAKE_CASE )
original_model.eval()
__lowerCAmelCase = original_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
__lowerCAmelCase = original_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowerCAmelCase = {"""type""": scaling_type, """factor""": 10.0}
__lowerCAmelCase = OpenLlamaModel(__SCREAMING_SNAKE_CASE )
scaled_model.to(__SCREAMING_SNAKE_CASE )
scaled_model.eval()
__lowerCAmelCase = scaled_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
__lowerCAmelCase = scaled_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,atol=1e-5 ) )
| 689 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase ) -> tuple[int, int]:
try:
__lowerCAmelCase = float(lowercase )
except ValueError:
raise ValueError("""Please enter a valid number""" )
__lowerCAmelCase = decimal - int(lowercase )
if fractional_part == 0:
return int(lowercase ), 1
else:
__lowerCAmelCase = len(str(lowercase ).split(""".""" )[1] )
__lowerCAmelCase = int(decimal * (10**number_of_frac_digits) )
__lowerCAmelCase = 10**number_of_frac_digits
__lowerCAmelCase , __lowerCAmelCase = denominator, numerator
while True:
__lowerCAmelCase = dividend % divisor
if remainder == 0:
break
__lowerCAmelCase , __lowerCAmelCase = divisor, remainder
__lowerCAmelCase , __lowerCAmelCase = numerator / divisor, denominator / divisor
return int(lowercase ), int(lowercase )
if __name__ == "__main__":
print(f'{decimal_to_fraction(2) = }')
print(f'{decimal_to_fraction(89.0) = }')
print(f'{decimal_to_fraction("67") = }')
print(f'{decimal_to_fraction("45.0") = }')
print(f'{decimal_to_fraction(1.5) = }')
print(f'{decimal_to_fraction("6.25") = }')
print(f'{decimal_to_fraction("78td") = }')
| 689 | 1 |
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
_a : Dict = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
_a : Any = f'https://www.google.com/search?q={query}&num=100'
_a : Any = requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
_a : List[str] = (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
_a : List[Any] = parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link)
| 689 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_a : Dict = _symbol_database.Default()
_a : Union[str, Any] = _descriptor_pool.Default().AddSerializedFile(
b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
_a : str = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_a : str = None
_a : Union[str, Any] = b"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_a : Optional[int] = 4_5
_a : List[Any] = 1_5_8_1
_a : str = 1_5_1_7
_a : Optional[Any] = 1_5_7_0
_a : List[str] = 1_5_8_4
_a : List[Any] = 1_7_9_3
_a : Union[str, Any] = 1_7_9_5
_a : Tuple = 1_9_1_6
_a : List[Any] = 1_8_6_4
_a : Any = 1_9_0_5
_a : Optional[Any] = 1_9_1_9
_a : Optional[int] = 2_4_2_9
_a : Tuple = 2_2_0_8
_a : Optional[Any] = 2_4_1_8
_a : List[Any] = 2_3_2_3
_a : str = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 689 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase ) -> tuple[int, int]:
try:
__lowerCAmelCase = float(lowercase )
except ValueError:
raise ValueError("""Please enter a valid number""" )
__lowerCAmelCase = decimal - int(lowercase )
if fractional_part == 0:
return int(lowercase ), 1
else:
__lowerCAmelCase = len(str(lowercase ).split(""".""" )[1] )
__lowerCAmelCase = int(decimal * (10**number_of_frac_digits) )
__lowerCAmelCase = 10**number_of_frac_digits
__lowerCAmelCase , __lowerCAmelCase = denominator, numerator
while True:
__lowerCAmelCase = dividend % divisor
if remainder == 0:
break
__lowerCAmelCase , __lowerCAmelCase = divisor, remainder
__lowerCAmelCase , __lowerCAmelCase = numerator / divisor, denominator / divisor
return int(lowercase ), int(lowercase )
if __name__ == "__main__":
print(f'{decimal_to_fraction(2) = }')
print(f'{decimal_to_fraction(89.0) = }')
print(f'{decimal_to_fraction("67") = }')
print(f'{decimal_to_fraction("45.0") = }')
print(f'{decimal_to_fraction(1.5) = }')
print(f'{decimal_to_fraction("6.25") = }')
print(f'{decimal_to_fraction("78td") = }')
| 689 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : torch.FloatTensor
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE=True,):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = layers_per_block
__lowerCAmelCase = torch.nn.Convad(
__SCREAMING_SNAKE_CASE,block_out_channels[0],kernel_size=3,stride=1,padding=1,)
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
# down
__lowerCAmelCase = block_out_channels[0]
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = block_out_channels[i]
__lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase = get_down_block(
__SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,add_downsample=not is_final_block,resnet_eps=1e-6,downsample_padding=0,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
__lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""",attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
# out
__lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[-1],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = 2 * out_channels if double_z else out_channels
__lowerCAmelCase = nn.Convad(block_out_channels[-1],__SCREAMING_SNAKE_CASE,3,padding=1 )
__lowerCAmelCase = False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = x
__lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(""">=""","""1.11.0""" ):
for down_block in self.down_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
__lowerCAmelCase = down_block(__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE )
# post-process
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE="group",):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = layers_per_block
__lowerCAmelCase = nn.Convad(
__SCREAMING_SNAKE_CASE,block_out_channels[-1],kernel_size=3,stride=1,padding=1,)
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
__lowerCAmelCase = in_channels if norm_type == """spatial""" else None
# mid
__lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type,attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
# up
__lowerCAmelCase = list(reversed(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = reversed_block_out_channels[i]
__lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase = get_up_block(
__SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block + 1,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,prev_output_channel=__SCREAMING_SNAKE_CASE,add_upsample=not is_final_block,resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,resnet_time_scale_shift=__SCREAMING_SNAKE_CASE,)
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = output_channel
# out
if norm_type == "spatial":
__lowerCAmelCase = SpatialNorm(block_out_channels[0],__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[0],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = nn.Convad(block_out_channels[0],__SCREAMING_SNAKE_CASE,3,padding=1 )
__lowerCAmelCase = False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
__lowerCAmelCase = z
__lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(""">=""","""1.11.0""" ):
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
else:
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
else:
# middle
__lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = up_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="random",__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = n_e
__lowerCAmelCase = vq_embed_dim
__lowerCAmelCase = beta
__lowerCAmelCase = legacy
__lowerCAmelCase = nn.Embedding(self.n_e,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e,1.0 / self.n_e )
__lowerCAmelCase = remap
if self.remap is not None:
self.register_buffer("""used""",torch.tensor(np.load(self.remap ) ) )
__lowerCAmelCase = self.used.shape[0]
__lowerCAmelCase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__lowerCAmelCase = self.re_embed
__lowerCAmelCase = self.re_embed + 1
print(
f'Remapping {self.n_e} indices to {self.re_embed} indices. '
f'Using {self.unknown_index} for unknown indices.' )
else:
__lowerCAmelCase = n_e
__lowerCAmelCase = sane_index_shape
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
__lowerCAmelCase = inds.reshape(ishape[0],-1 )
__lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = (inds[:, :, None] == used[None, None, ...]).long()
__lowerCAmelCase = match.argmax(-1 )
__lowerCAmelCase = match.sum(2 ) < 1
if self.unknown_index == "random":
__lowerCAmelCase = torch.randint(0,self.re_embed,size=new[unknown].shape ).to(device=new.device )
else:
__lowerCAmelCase = self.unknown_index
return new.reshape(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
__lowerCAmelCase = inds.reshape(ishape[0],-1 )
__lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
__lowerCAmelCase = 0 # simply set to zero
__lowerCAmelCase = torch.gather(used[None, :][inds.shape[0] * [0], :],1,__SCREAMING_SNAKE_CASE )
return back.reshape(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = z.permute(0,2,3,1 ).contiguous()
__lowerCAmelCase = z.view(-1,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__lowerCAmelCase = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE,self.embedding.weight ),dim=1 )
__lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape )
__lowerCAmelCase = None
__lowerCAmelCase = None
# compute loss for embedding
if not self.legacy:
__lowerCAmelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__lowerCAmelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__lowerCAmelCase = z + (z_q - z).detach()
# reshape back to match original input shape
__lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous()
if self.remap is not None:
__lowerCAmelCase = min_encoding_indices.reshape(z.shape[0],-1 ) # add batch axis
__lowerCAmelCase = self.remap_to_used(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = min_encoding_indices.reshape(-1,1 ) # flatten
if self.sane_index_shape:
__lowerCAmelCase = min_encoding_indices.reshape(z_q.shape[0],z_q.shape[2],z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if self.remap is not None:
__lowerCAmelCase = indices.reshape(shape[0],-1 ) # add batch axis
__lowerCAmelCase = self.unmap_to_all(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE )
if shape is not None:
__lowerCAmelCase = z_q.view(__SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
__lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous()
return z_q
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = parameters
__lowerCAmelCase , __lowerCAmelCase = torch.chunk(__SCREAMING_SNAKE_CASE,2,dim=1 )
__lowerCAmelCase = torch.clamp(self.logvar,-30.0,20.0 )
__lowerCAmelCase = deterministic
__lowerCAmelCase = torch.exp(0.5 * self.logvar )
__lowerCAmelCase = torch.exp(self.logvar )
if self.deterministic:
__lowerCAmelCase = __lowerCAmelCase = torch.zeros_like(
self.mean,device=self.parameters.device,dtype=self.parameters.dtype )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
__lowerCAmelCase = randn_tensor(
self.mean.shape,generator=__SCREAMING_SNAKE_CASE,device=self.parameters.device,dtype=self.parameters.dtype )
__lowerCAmelCase = self.mean + self.std * sample
return x
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean,2 ) + self.var - 1.0 - self.logvar,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar,dim=[1, 2, 3],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
__lowerCAmelCase = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean,2 ) / self.var,dim=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.mean
| 689 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Tuple =VideoToVideoSDPipeline
a : Optional[int] =TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
a : List[Any] =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
a : int =PipelineTesterMixin.required_optional_params - {"""latents"""}
a : Dict =False
# No `output_type`.
a : List[Any] =frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D"""),up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D"""),cross_attention_dim=32,attention_head_dim=4,)
__lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0085,beta_end=0.012,beta_schedule="""scaled_linear""",clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,)
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],latent_channels=4,sample_size=1_28,)
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1e-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=10_00,hidden_act="""gelu""",projection_dim=5_12,)
__lowerCAmelCase = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
__lowerCAmelCase = floats_tensor((1, 3, 3, 32, 32),rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""video""": video,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = VideoToVideoSDPipeline(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = """np"""
__lowerCAmelCase = sd_pipe(**__SCREAMING_SNAKE_CASE ).frames
__lowerCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__lowerCAmelCase = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available(),reason="""XFormers attention is only available with CUDA and `xformers` installed""",)
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=5e-3 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = VideoToVideoSDPipeline.from_pretrained("""cerspense/zeroscope_v2_XL""",torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = torch.randn((1, 10, 3, 10_24, 5_76),generator=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = video.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,video=__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=3,output_type="""pt""" ).frames
__lowerCAmelCase = np.array([-1.045_8984, -1.127_9297, -0.966_3086, -0.9150_3906, -0.7509_7656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 689 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_a : Optional[int] = logging.get_logger(__name__)
_a : int = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_a : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(lowerCAmelCase_ )} )
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
a : int =field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : int =field(
default=1_28 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
a : int =field(
default=64 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
a : int =field(
default=30 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
a : float =field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a : int =field(
default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a : int =field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
a : int =field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Optional[Any] ="""train"""
a : Optional[int] ="""dev"""
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : SquadDataTrainingArguments
a : List[SquadFeatures]
a : Split
a : bool
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = Split.train,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = "pt",):
'''simple docstring'''
__lowerCAmelCase = args
__lowerCAmelCase = is_language_sensitive
__lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
try:
__lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
__lowerCAmelCase = mode
# Load data features from cache or dataset file
__lowerCAmelCase = """v2""" if args.version_2_with_negative else """v1"""
__lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase = cached_features_file + """.lock"""
with FileLock(__SCREAMING_SNAKE_CASE ):
if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache:
__lowerCAmelCase = time.time()
__lowerCAmelCase = torch.load(__SCREAMING_SNAKE_CASE )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__lowerCAmelCase = self.old_features["""features"""]
__lowerCAmelCase = self.old_features.get("""dataset""",__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.old_features.get("""examples""",__SCREAMING_SNAKE_CASE )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
""" future run""" )
else:
if mode == Split.dev:
__lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
__lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
__lowerCAmelCase , __lowerCAmelCase = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__SCREAMING_SNAKE_CASE,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples},__SCREAMING_SNAKE_CASE,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.features[i]
__lowerCAmelCase = torch.tensor(feature.input_ids,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.attention_mask,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.token_type_ids,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.cls_index,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.p_mask,dtype=torch.float )
__lowerCAmelCase = torch.tensor(feature.is_impossible,dtype=torch.float )
__lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__lowerCAmelCase = torch.tensor(feature.start_position,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 689 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase ) -> Optional[Any]:
__lowerCAmelCase = []
__lowerCAmelCase = set({"""(""", """[""", """{"""} )
__lowerCAmelCase = set({""")""", """]""", """}"""} )
__lowerCAmelCase = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(lowercase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowercase ) == 0 or (len(lowercase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowercase ) == 0
def _lowerCAmelCase ( ) -> str:
__lowerCAmelCase = input("""Enter sequence of brackets: """ )
if is_balanced(lowercase ):
print(lowercase , """is balanced""" )
else:
print(lowercase , """is not balanced""" )
if __name__ == "__main__":
main()
| 689 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _lowerCAmelCase ( lowercase ) -> Optional[Any]:
# vision encoder
if "img_encoder.pos_embed" in name:
__lowerCAmelCase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
__lowerCAmelCase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
__lowerCAmelCase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
__lowerCAmelCase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
__lowerCAmelCase = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
__lowerCAmelCase = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
__lowerCAmelCase = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
__lowerCAmelCase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
__lowerCAmelCase = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
__lowerCAmelCase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
__lowerCAmelCase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
__lowerCAmelCase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
__lowerCAmelCase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
__lowerCAmelCase = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
__lowerCAmelCase = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
__lowerCAmelCase = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
__lowerCAmelCase = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
__lowerCAmelCase = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
__lowerCAmelCase = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
__lowerCAmelCase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
__lowerCAmelCase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
__lowerCAmelCase = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
__lowerCAmelCase = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> Dict:
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(lowercase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase = key.split(""".""" )
__lowerCAmelCase , __lowerCAmelCase = int(key_split[2] ), int(key_split[4] )
__lowerCAmelCase = config.vision_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase = key.split(""".""" )
__lowerCAmelCase = int(key_split[3] )
__lowerCAmelCase = config.text_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[
dim : dim * 2, :
]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
else:
__lowerCAmelCase = rename_key(lowercase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
__lowerCAmelCase = val.squeeze_()
else:
__lowerCAmelCase = val
return orig_state_dict
def _lowerCAmelCase ( ) -> str:
__lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCAmelCase = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase , lowercase="groupvit-gcc-yfcc" , lowercase=False ) -> List[Any]:
__lowerCAmelCase = GroupViTConfig()
__lowerCAmelCase = GroupViTModel(lowercase ).eval()
__lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" )["""model"""]
__lowerCAmelCase = convert_state_dict(lowercase , lowercase )
__lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowercase , strict=lowercase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowercase ) == 0)
# verify result
__lowerCAmelCase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowercase , padding=lowercase , return_tensors="""pt""" )
with torch.no_grad():
__lowerCAmelCase = model(**lowercase )
if model_name == "groupvit-gcc-yfcc":
__lowerCAmelCase = torch.tensor([[13.35_23, 6.36_29]] )
elif model_name == "groupvit-gcc-redcaps":
__lowerCAmelCase = torch.tensor([[16.18_73, 8.62_30]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , lowercase , atol=1e-3 )
processor.save_pretrained(lowercase )
model.save_pretrained(lowercase )
print("""Successfully saved processor and model to""" , lowercase )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowercase , organization="""nielsr""" )
model.push_to_hub(lowercase , organization="""nielsr""" )
if __name__ == "__main__":
_a : int = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
_a : List[str] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 689 | 1 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE=0.01,__SCREAMING_SNAKE_CASE=10_00 ):
'''simple docstring'''
__lowerCAmelCase = p_stop
__lowerCAmelCase = max_length
def __iter__( self ):
'''simple docstring'''
__lowerCAmelCase = 0
__lowerCAmelCase = False
while not stop and count < self.max_length:
yield count
count += 1
__lowerCAmelCase = random.random() < self.p_stop
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
__lowerCAmelCase = [
BatchSamplerShard(__SCREAMING_SNAKE_CASE,2,__SCREAMING_SNAKE_CASE,split_batches=__SCREAMING_SNAKE_CASE,even_batches=__SCREAMING_SNAKE_CASE )
for i in range(2 )
]
__lowerCAmelCase = [list(__SCREAMING_SNAKE_CASE ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__SCREAMING_SNAKE_CASE ) for shard in batch_sampler_shards],[len(__SCREAMING_SNAKE_CASE ) for e in expected] )
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = BatchSampler(range(24 ),batch_size=3,drop_last=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = BatchSampler(range(24 ),batch_size=3,drop_last=__SCREAMING_SNAKE_CASE )
# Expected shouldn't change
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__lowerCAmelCase = BatchSampler(range(21 ),batch_size=3,drop_last=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = BatchSampler(range(21 ),batch_size=3,drop_last=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__lowerCAmelCase = BatchSampler(range(22 ),batch_size=3,drop_last=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = BatchSampler(range(22 ),batch_size=3,drop_last=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__lowerCAmelCase = BatchSampler(range(20 ),batch_size=3,drop_last=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = BatchSampler(range(20 ),batch_size=3,drop_last=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is very small.
__lowerCAmelCase = BatchSampler(range(2 ),batch_size=3,drop_last=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = BatchSampler(range(2 ),batch_size=3,drop_last=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [[], []]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = BatchSampler(range(24 ),batch_size=4,drop_last=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,split_batches=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = BatchSampler(range(24 ),batch_size=4,drop_last=__SCREAMING_SNAKE_CASE )
# Expected shouldn't change
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,split_batches=__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size.
__lowerCAmelCase = BatchSampler(range(22 ),batch_size=4,drop_last=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,split_batches=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = BatchSampler(range(22 ),batch_size=4,drop_last=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,split_batches=__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__lowerCAmelCase = BatchSampler(range(21 ),batch_size=4,drop_last=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,split_batches=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = BatchSampler(range(21 ),batch_size=4,drop_last=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,split_batches=__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is very small.
__lowerCAmelCase = BatchSampler(range(2 ),batch_size=4,drop_last=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,split_batches=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = BatchSampler(range(2 ),batch_size=4,drop_last=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [[], []]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,split_batches=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = BatchSampler(range(24 ),batch_size=3,drop_last=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,even_batches=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = BatchSampler(range(24 ),batch_size=3,drop_last=__SCREAMING_SNAKE_CASE )
# Expected shouldn't change
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,even_batches=__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__lowerCAmelCase = BatchSampler(range(21 ),batch_size=3,drop_last=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,even_batches=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = BatchSampler(range(21 ),batch_size=3,drop_last=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,even_batches=__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__lowerCAmelCase = BatchSampler(range(22 ),batch_size=3,drop_last=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,even_batches=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = BatchSampler(range(22 ),batch_size=3,drop_last=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,even_batches=__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__lowerCAmelCase = BatchSampler(range(20 ),batch_size=3,drop_last=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,even_batches=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = BatchSampler(range(20 ),batch_size=3,drop_last=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,even_batches=__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is very small.
__lowerCAmelCase = BatchSampler(range(2 ),batch_size=3,drop_last=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,even_batches=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = BatchSampler(range(2 ),batch_size=3,drop_last=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [[], []]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,even_batches=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = BatchSampler(range(24 ),batch_size=4,drop_last=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,split_batches=__SCREAMING_SNAKE_CASE,even_batches=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = BatchSampler(range(24 ),batch_size=4,drop_last=__SCREAMING_SNAKE_CASE )
# Expected shouldn't change
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,split_batches=__SCREAMING_SNAKE_CASE,even_batches=__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size.
__lowerCAmelCase = BatchSampler(range(22 ),batch_size=4,drop_last=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,split_batches=__SCREAMING_SNAKE_CASE,even_batches=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = BatchSampler(range(22 ),batch_size=4,drop_last=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,split_batches=__SCREAMING_SNAKE_CASE,even_batches=__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__lowerCAmelCase = BatchSampler(range(21 ),batch_size=4,drop_last=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,split_batches=__SCREAMING_SNAKE_CASE,even_batches=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = BatchSampler(range(21 ),batch_size=4,drop_last=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,split_batches=__SCREAMING_SNAKE_CASE,even_batches=__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is very small.
__lowerCAmelCase = BatchSampler(range(2 ),batch_size=4,drop_last=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,split_batches=__SCREAMING_SNAKE_CASE,even_batches=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = BatchSampler(range(2 ),batch_size=4,drop_last=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [[], []]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,split_batches=__SCREAMING_SNAKE_CASE,even_batches=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__lowerCAmelCase = [BatchSamplerShard(__SCREAMING_SNAKE_CASE,2,__SCREAMING_SNAKE_CASE,even_batches=__SCREAMING_SNAKE_CASE ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ),3 )
self.assertEqual(len(batch_sampler_shards[1] ),2 )
self.assertListEqual(list(batch_sampler_shards[0] ),[[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ),[[3, 4], [9, 10, 11]] )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
random.seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = list(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [
IterableDatasetShard(
__SCREAMING_SNAKE_CASE,batch_size=__SCREAMING_SNAKE_CASE,drop_last=__SCREAMING_SNAKE_CASE,num_processes=__SCREAMING_SNAKE_CASE,process_index=__SCREAMING_SNAKE_CASE,split_batches=__SCREAMING_SNAKE_CASE,)
for i in range(__SCREAMING_SNAKE_CASE )
]
__lowerCAmelCase = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__SCREAMING_SNAKE_CASE )
iterable_dataset_lists.append(list(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__lowerCAmelCase = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__SCREAMING_SNAKE_CASE ),len(__SCREAMING_SNAKE_CASE ) )
self.assertTrue(len(__SCREAMING_SNAKE_CASE ) % shard_batch_size == 0 )
__lowerCAmelCase = []
for idx in range(0,len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__SCREAMING_SNAKE_CASE ) < len(__SCREAMING_SNAKE_CASE ):
reference += reference
self.assertListEqual(__SCREAMING_SNAKE_CASE,reference[: len(__SCREAMING_SNAKE_CASE )] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = 42
__lowerCAmelCase = RandomIterableDataset()
self.check_iterable_dataset_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,batch_size=4,drop_last=__SCREAMING_SNAKE_CASE,split_batches=__SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,batch_size=4,drop_last=__SCREAMING_SNAKE_CASE,split_batches=__SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,batch_size=4,drop_last=__SCREAMING_SNAKE_CASE,split_batches=__SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,batch_size=4,drop_last=__SCREAMING_SNAKE_CASE,split_batches=__SCREAMING_SNAKE_CASE )
# Edge case with a very small dataset
__lowerCAmelCase = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,batch_size=4,drop_last=__SCREAMING_SNAKE_CASE,split_batches=__SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,batch_size=4,drop_last=__SCREAMING_SNAKE_CASE,split_batches=__SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,batch_size=4,drop_last=__SCREAMING_SNAKE_CASE,split_batches=__SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,batch_size=4,drop_last=__SCREAMING_SNAKE_CASE,split_batches=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = BatchSampler(range(16 ),batch_size=4,drop_last=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = SkipBatchSampler(__SCREAMING_SNAKE_CASE,2 )
self.assertListEqual(list(__SCREAMING_SNAKE_CASE ),[[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = SkipDataLoader(list(range(16 ) ),batch_size=4,skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader],[[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = DataLoader(list(range(16 ) ),batch_size=4 )
__lowerCAmelCase = skip_first_batches(__SCREAMING_SNAKE_CASE,num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader],[[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = DataLoaderShard(list(range(16 ) ),batch_size=4 )
for idx, _ in enumerate(__SCREAMING_SNAKE_CASE ):
self.assertEqual(dataloader.end_of_dataloader,idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__SCREAMING_SNAKE_CASE ):
self.assertEqual(dataloader.end_of_dataloader,idx == 3 )
def lowerCamelCase__ ( self ):
'''simple docstring'''
Accelerator()
__lowerCAmelCase = DataLoaderDispatcher(range(16 ),batch_size=4 )
for idx, _ in enumerate(__SCREAMING_SNAKE_CASE ):
self.assertEqual(dataloader.end_of_dataloader,idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__SCREAMING_SNAKE_CASE ):
self.assertEqual(dataloader.end_of_dataloader,idx == 3 )
| 689 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_a : Tuple = logging.get_logger(__name__)
_a : Optional[int] = ["""model.decoder.embed_positions.weights"""]
def _lowerCAmelCase ( lowercase ) -> Optional[Any]:
if "emb" in name:
__lowerCAmelCase = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
__lowerCAmelCase = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
__lowerCAmelCase = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
__lowerCAmelCase = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
__lowerCAmelCase = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
__lowerCAmelCase = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
__lowerCAmelCase = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
__lowerCAmelCase = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
__lowerCAmelCase = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
__lowerCAmelCase = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> Tuple[Dict, Dict]:
__lowerCAmelCase = list(state_dict.keys() )
__lowerCAmelCase = {}
for key in keys:
__lowerCAmelCase = state_dict.pop(lowercase )
__lowerCAmelCase = rename_keys(lowercase )
if "in_proj_weight" in key:
# split fused qkv proj
__lowerCAmelCase = val[:hidden_size, :]
__lowerCAmelCase = val[hidden_size : 2 * hidden_size, :]
__lowerCAmelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__lowerCAmelCase = val
else:
__lowerCAmelCase = val
return state_dict, enc_dec_proj_state_dict
def _lowerCAmelCase ( lowercase ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
__lowerCAmelCase = 1024
__lowerCAmelCase = 24
__lowerCAmelCase = 16
elif checkpoint == "medium":
__lowerCAmelCase = 1536
__lowerCAmelCase = 48
__lowerCAmelCase = 24
elif checkpoint == "large":
__lowerCAmelCase = 2048
__lowerCAmelCase = 48
__lowerCAmelCase = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
__lowerCAmelCase = MusicgenDecoderConfig(
hidden_size=lowercase , ffn_dim=hidden_size * 4 , num_hidden_layers=lowercase , num_attention_heads=lowercase , )
return config
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase=None , lowercase=None , lowercase="cpu" ) -> Optional[Any]:
__lowerCAmelCase = MusicGen.get_pretrained(lowercase , device=lowercase )
__lowerCAmelCase = decoder_config_from_checkpoint(lowercase )
__lowerCAmelCase = fairseq_model.lm.state_dict()
__lowerCAmelCase , __lowerCAmelCase = rename_state_dict(
lowercase , hidden_size=decoder_config.hidden_size )
__lowerCAmelCase = TaEncoderModel.from_pretrained("""t5-base""" )
__lowerCAmelCase = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
__lowerCAmelCase = MusicgenForCausalLM(lowercase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__lowerCAmelCase , __lowerCAmelCase = decoder.load_state_dict(lowercase , strict=lowercase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase )
if len(lowercase ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(lowercase ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
__lowerCAmelCase = MusicgenForConditionalGeneration(text_encoder=lowercase , audio_encoder=lowercase , decoder=lowercase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase )
# check we can do a forward pass
__lowerCAmelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__lowerCAmelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__lowerCAmelCase = model(input_ids=lowercase , decoder_input_ids=lowercase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
__lowerCAmelCase = AutoTokenizer.from_pretrained("""t5-base""" )
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
__lowerCAmelCase = MusicgenProcessor(feature_extractor=lowercase , tokenizer=lowercase )
# set the appropriate bos/pad token ids
__lowerCAmelCase = 2048
__lowerCAmelCase = 2048
# set other default generation config params
__lowerCAmelCase = int(30 * audio_encoder.config.frame_rate )
__lowerCAmelCase = True
__lowerCAmelCase = 3.0
if pytorch_dump_folder is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(lowercase )
processor.save_pretrained(lowercase )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(lowercase )
processor.push_to_hub(lowercase )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
_a : List[Any] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 689 | 1 |
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def _lowerCAmelCase ( lowercase ) -> str:
if not sentence:
return ""
__lowerCAmelCase = dict(zip(lowercase , lowercase ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 689 |
'''simple docstring'''
from collections import deque
def _lowerCAmelCase ( lowercase ) -> Dict:
__lowerCAmelCase = len(lowercase )
__lowerCAmelCase = deque()
__lowerCAmelCase = [False for _ in range(lowercase )]
__lowerCAmelCase = [-1 for _ in range(lowercase )]
__lowerCAmelCase = index_of[:]
def strong_connect(lowercase , lowercase , lowercase ):
__lowerCAmelCase = index # the number when this node is seen
__lowerCAmelCase = index # lowest rank node reachable from here
index += 1
stack.append(lowercase )
__lowerCAmelCase = True
for w in g[v]:
if index_of[w] == -1:
__lowerCAmelCase = strong_connect(lowercase , lowercase , lowercase )
__lowerCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__lowerCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__lowerCAmelCase = []
__lowerCAmelCase = stack.pop()
__lowerCAmelCase = False
component.append(lowercase )
while w != v:
__lowerCAmelCase = stack.pop()
__lowerCAmelCase = False
component.append(lowercase )
components.append(lowercase )
return index
__lowerCAmelCase = []
for v in range(lowercase ):
if index_of[v] == -1:
strong_connect(lowercase , 0 , lowercase )
return components
def _lowerCAmelCase ( lowercase , lowercase ) -> str:
__lowerCAmelCase = [[] for _ in range(lowercase )]
for u, v in edges:
g[u].append(lowercase )
return g
if __name__ == "__main__":
# Test
_a : Any = 7
_a : Tuple = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_a : Optional[int] = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_a : Optional[Any] = [(u, v) for u, v in zip(source, target)]
_a : Optional[int] = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 689 | 1 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[Any] ="""Wav2Vec2FeatureExtractor"""
a : int ="""AutoTokenizer"""
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.feature_extractor
__lowerCAmelCase = False
@classmethod
def lowerCamelCase__ ( cls,__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
try:
return super().from_pretrained(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
except OSError:
warnings.warn(
f'Loading a tokenizer inside {cls.__name__} from a config that does not'
""" include a `tokenizer_class` attribute is deprecated and will be """
"""removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"""
""" attribute to either your `config.json` or `tokenizer_config.json` """
"""file to suppress this warning: """,__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = WavaVecaCTCTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
return cls(feature_extractor=__SCREAMING_SNAKE_CASE,tokenizer=__SCREAMING_SNAKE_CASE )
def __call__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
__lowerCAmelCase = kwargs.pop("""raw_speech""" )
else:
__lowerCAmelCase = kwargs.pop("""audio""",__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = kwargs.pop("""sampling_rate""",__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = kwargs.pop("""text""",__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
__lowerCAmelCase = args[0]
__lowerCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
__lowerCAmelCase = self.feature_extractor(__SCREAMING_SNAKE_CASE,*__SCREAMING_SNAKE_CASE,sampling_rate=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
if text is not None:
__lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__lowerCAmelCase = encodings["""input_ids"""]
return inputs
def lowerCamelCase__ ( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = kwargs.pop("""input_features""",__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = kwargs.pop("""labels""",__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
__lowerCAmelCase = args[0]
__lowerCAmelCase = args[1:]
if input_features is not None:
__lowerCAmelCase = self.feature_extractor.pad(__SCREAMING_SNAKE_CASE,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
if labels is not None:
__lowerCAmelCase = self.tokenizer.pad(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__lowerCAmelCase = labels["""input_ids"""]
return input_features
def lowerCamelCase__ ( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
@contextmanager
def lowerCamelCase__ ( self ):
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
__lowerCAmelCase = True
__lowerCAmelCase = self.tokenizer
yield
__lowerCAmelCase = self.feature_extractor
__lowerCAmelCase = False
| 689 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ) -> Union[str, Any]:
__lowerCAmelCase = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
__lowerCAmelCase = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(lowercase )
# Let's go
__lowerCAmelCase = parser.parse_args()
if not hasattr(lowercase , """func""" ):
parser.print_help()
exit(1 )
# Run
__lowerCAmelCase = args.func(lowercase )
service.run()
if __name__ == "__main__":
main()
| 689 | 1 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Optional[int] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
_a : int = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
_a : Dict = {
"""ctrl""": 2_5_6,
}
_a : Optional[Any] = {
"""Pregnancy""": 1_6_8_6_2_9,
"""Christianity""": 7_6_7_5,
"""Explain""": 1_0_6_4_2_3,
"""Fitness""": 6_3_4_4_0,
"""Saving""": 6_3_1_6_3,
"""Ask""": 2_7_1_7_1,
"""Ass""": 9_5_9_8_5,
"""Joke""": 1_6_3_5_0_9,
"""Questions""": 4_5_6_2_2,
"""Thoughts""": 4_9_6_0_5,
"""Retail""": 5_2_3_4_2,
"""Feminism""": 1_6_4_3_3_8,
"""Writing""": 1_1_9_9_2,
"""Atheism""": 1_9_2_2_6_3,
"""Netflix""": 4_8_6_1_6,
"""Computing""": 3_9_6_3_9,
"""Opinion""": 4_3_2_1_3,
"""Alone""": 4_4_9_6_7,
"""Funny""": 5_8_9_1_7,
"""Gaming""": 4_0_3_5_8,
"""Human""": 4_0_8_8,
"""India""": 1_3_3_1,
"""Joker""": 7_7_1_3_8,
"""Diet""": 3_6_2_0_6,
"""Legal""": 1_1_8_5_9,
"""Norman""": 4_9_3_9,
"""Tip""": 7_2_6_8_9,
"""Weight""": 5_2_3_4_3,
"""Movies""": 4_6_2_7_3,
"""Running""": 2_3_4_2_5,
"""Science""": 2_0_9_0,
"""Horror""": 3_7_7_9_3,
"""Confession""": 6_0_5_7_2,
"""Finance""": 1_2_2_5_0,
"""Politics""": 1_6_3_6_0,
"""Scary""": 1_9_1_9_8_5,
"""Support""": 1_2_6_5_4,
"""Technologies""": 3_2_5_1_6,
"""Teenage""": 6_6_1_6_0,
"""Event""": 3_2_7_6_9,
"""Learned""": 6_7_4_6_0,
"""Notion""": 1_8_2_7_7_0,
"""Wikipedia""": 3_7_5_8_3,
"""Books""": 6_6_6_5,
"""Extract""": 7_6_0_5_0,
"""Confessions""": 1_0_2_7_0_1,
"""Conspiracy""": 7_5_9_3_2,
"""Links""": 6_3_6_7_4,
"""Narcissus""": 1_5_0_4_2_5,
"""Relationship""": 5_4_7_6_6,
"""Relationships""": 1_3_4_7_9_6,
"""Reviews""": 4_1_6_7_1,
"""News""": 4_2_5_6,
"""Translation""": 2_6_8_2_0,
"""multilingual""": 1_2_8_4_0_6,
}
def _lowerCAmelCase ( lowercase ) -> int:
__lowerCAmelCase = set()
__lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCAmelCase = char
__lowerCAmelCase = set(lowercase )
return pairs
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Any =VOCAB_FILES_NAMES
a : Any =PRETRAINED_VOCAB_FILES_MAP
a : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : int =CONTROL_CODES
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE="<unk>",**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(unk_token=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE,encoding="""utf-8""" ) as vocab_handle:
__lowerCAmelCase = json.load(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {v: k for k, v in self.encoder.items()}
with open(__SCREAMING_SNAKE_CASE,encoding="""utf-8""" ) as merges_handle:
__lowerCAmelCase = merges_handle.read().split("""\n""" )[1:-1]
__lowerCAmelCase = [tuple(merge.split() ) for merge in merges]
__lowerCAmelCase = dict(zip(__SCREAMING_SNAKE_CASE,range(len(__SCREAMING_SNAKE_CASE ) ) ) )
__lowerCAmelCase = {}
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return len(self.encoder )
def lowerCamelCase__ ( self ):
'''simple docstring'''
return dict(self.encoder,**self.added_tokens_encoder )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__lowerCAmelCase = tuple(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
__lowerCAmelCase = get_pairs(__SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
__lowerCAmelCase = min(__SCREAMING_SNAKE_CASE,key=lambda __SCREAMING_SNAKE_CASE : self.bpe_ranks.get(__SCREAMING_SNAKE_CASE,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCAmelCase , __lowerCAmelCase = bigram
__lowerCAmelCase = []
__lowerCAmelCase = 0
while i < len(__SCREAMING_SNAKE_CASE ):
try:
__lowerCAmelCase = word.index(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCAmelCase = j
if word[i] == first and i < len(__SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCAmelCase = tuple(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = new_word
if len(__SCREAMING_SNAKE_CASE ) == 1:
break
else:
__lowerCAmelCase = get_pairs(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = """@@ """.join(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = word[:-4]
__lowerCAmelCase = word
return word
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = re.findall(R"""\S+\n?""",__SCREAMING_SNAKE_CASE )
for token in words:
split_tokens.extend(list(self.bpe(__SCREAMING_SNAKE_CASE ).split(""" """ ) ) )
return split_tokens
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.encoder.get(__SCREAMING_SNAKE_CASE,self.encoder.get(self.unk_token ) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.decoder.get(__SCREAMING_SNAKE_CASE,self.unk_token )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = """ """.join(__SCREAMING_SNAKE_CASE ).replace("""@@ ""","""""" ).strip()
return out_string
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__SCREAMING_SNAKE_CASE,"""w""",encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder,indent=2,sort_keys=__SCREAMING_SNAKE_CASE,ensure_ascii=__SCREAMING_SNAKE_CASE ) + """\n""" )
__lowerCAmelCase = 0
with open(__SCREAMING_SNAKE_CASE,"""w""",encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(),key=lambda __SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
__lowerCAmelCase = token_index
writer.write(""" """.join(__SCREAMING_SNAKE_CASE ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 689 |
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_a : List[Any] = logging.get_logger(__name__)
_a : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
_a : Any = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> str:
for attribute in key.split(""".""" ):
__lowerCAmelCase = getattr(lowercase , lowercase )
if weight_type is not None:
__lowerCAmelCase = getattr(lowercase , lowercase ).shape
else:
__lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
__lowerCAmelCase = value
elif weight_type == "weight_g":
__lowerCAmelCase = value
elif weight_type == "weight_v":
__lowerCAmelCase = value
elif weight_type == "bias":
__lowerCAmelCase = value
else:
__lowerCAmelCase = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]:
__lowerCAmelCase = []
__lowerCAmelCase = fairseq_model.state_dict()
__lowerCAmelCase = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , )
__lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
__lowerCAmelCase = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split(""".""" )[:-1] ) != key):
# special case since naming is very similar
continue
__lowerCAmelCase = True
if "*" in mapped_key:
__lowerCAmelCase = name.split(lowercase )[0].split(""".""" )[-2]
__lowerCAmelCase = mapped_key.replace("""*""" , lowercase )
if "weight_g" in name:
__lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
__lowerCAmelCase = """weight_v"""
elif "bias" in name:
__lowerCAmelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase = """weight"""
else:
__lowerCAmelCase = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__lowerCAmelCase = full_name.split("""conv_layers.""" )[-1]
__lowerCAmelCase = name.split(""".""" )
__lowerCAmelCase = int(items[0] )
__lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Dict:
if config_path is not None:
__lowerCAmelCase = UniSpeechSatConfig.from_pretrained(lowercase )
else:
__lowerCAmelCase = UniSpeechSatConfig()
__lowerCAmelCase = """"""
if is_finetuned:
__lowerCAmelCase = UniSpeechSatForCTC(lowercase )
else:
__lowerCAmelCase = UniSpeechSatForPreTraining(lowercase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
__lowerCAmelCase = model[0].eval()
recursively_load_weights(lowercase , lowercase )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
_a : List[str] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_a : Union[str, Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 689 | 1 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_50, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_00, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_00, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
] )
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split(),encoding="""utf-8""",check=__SCREAMING_SNAKE_CASE,)
assert hasattr(self,"""env""" )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = f'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
__lowerCAmelCase = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script,source_dir=self.env.test_path,role=self.env.role,image_uri=self.env.image_uri,base_job_name=__SCREAMING_SNAKE_CASE,instance_count=__SCREAMING_SNAKE_CASE,instance_type=self.instance_type,debugger_hook_config=__SCREAMING_SNAKE_CASE,hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path},metric_definitions=self.env.metric_definitions,distribution=__SCREAMING_SNAKE_CASE,py_version="""py36""",)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
TrainingJobAnalytics(__SCREAMING_SNAKE_CASE ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(2,)] )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.create_estimator(__SCREAMING_SNAKE_CASE )
# run training
estimator.fit()
# result dataframe
__lowerCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowerCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
__lowerCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""",99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json',"""w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss},__SCREAMING_SNAKE_CASE )
| 689 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
_a : str = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
_a : Dict = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
_a : List[str] = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ),reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = spearmanr(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 689 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_a : Optional[int] = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
a : Optional[int] =MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a : Optional[int] =TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a : str ={config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a : Tuple ={
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = pipeline(
task="""text-classification""",model="""hf-internal-testing/tiny-random-distilbert""",framework="""pt""" )
__lowerCAmelCase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ),[{"""label""": """LABEL_0""", """score""": 0.504}] )
__lowerCAmelCase = text_classifier("""This is great !""",top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ),[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] )
__lowerCAmelCase = text_classifier(["""This is great !""", """This is bad"""],top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ),[
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
],)
__lowerCAmelCase = text_classifier("""This is great !""",top_k=1 )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ),[{"""label""": """LABEL_0""", """score""": 0.504}] )
# Legacy behavior
__lowerCAmelCase = text_classifier("""This is great !""",return_all_scores=__SCREAMING_SNAKE_CASE )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ),[{"""label""": """LABEL_0""", """score""": 0.504}] )
__lowerCAmelCase = text_classifier("""This is great !""",return_all_scores=__SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ),[[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] )
__lowerCAmelCase = text_classifier(["""This is great !""", """Something else"""],return_all_scores=__SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ),[
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
],)
__lowerCAmelCase = text_classifier(["""This is great !""", """Something else"""],return_all_scores=__SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ),[
{"""label""": """LABEL_0""", """score""": 0.504},
{"""label""": """LABEL_0""", """score""": 0.504},
],)
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
import torch
__lowerCAmelCase = pipeline(
task="""text-classification""",model="""hf-internal-testing/tiny-random-distilbert""",framework="""pt""",device=torch.device("""cpu""" ),)
__lowerCAmelCase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ),[{"""label""": """LABEL_0""", """score""": 0.504}] )
@require_tf
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = pipeline(
task="""text-classification""",model="""hf-internal-testing/tiny-random-distilbert""",framework="""tf""" )
__lowerCAmelCase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ),[{"""label""": """LABEL_0""", """score""": 0.504}] )
@slow
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = pipeline("""text-classification""" )
__lowerCAmelCase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ),[{"""label""": """POSITIVE""", """score""": 1.0}] )
__lowerCAmelCase = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ),[{"""label""": """NEGATIVE""", """score""": 1.0}] )
__lowerCAmelCase = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ),[{"""label""": """POSITIVE""", """score""": 0.988}] )
@slow
@require_tf
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = pipeline("""text-classification""",framework="""tf""" )
__lowerCAmelCase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ),[{"""label""": """POSITIVE""", """score""": 1.0}] )
__lowerCAmelCase = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ),[{"""label""": """NEGATIVE""", """score""": 1.0}] )
__lowerCAmelCase = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ),[{"""label""": """POSITIVE""", """score""": 0.988}] )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = TextClassificationPipeline(model=__SCREAMING_SNAKE_CASE,tokenizer=__SCREAMING_SNAKE_CASE )
return text_classifier, ["HuggingFace is in", "This is another test"]
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__lowerCAmelCase = """HuggingFace is in"""
__lowerCAmelCase = text_classifier(__SCREAMING_SNAKE_CASE )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ),[{"""label""": ANY(__SCREAMING_SNAKE_CASE ), """score""": ANY(__SCREAMING_SNAKE_CASE )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
__lowerCAmelCase = ["""HuggingFace is in """, """Paris is in France"""]
__lowerCAmelCase = text_classifier(__SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ),[{"""label""": ANY(__SCREAMING_SNAKE_CASE ), """score""": ANY(__SCREAMING_SNAKE_CASE )}, {"""label""": ANY(__SCREAMING_SNAKE_CASE ), """score""": ANY(__SCREAMING_SNAKE_CASE )}],)
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__lowerCAmelCase = text_classifier(__SCREAMING_SNAKE_CASE,top_k=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ),[[{"""label""": ANY(__SCREAMING_SNAKE_CASE ), """score""": ANY(__SCREAMING_SNAKE_CASE )}] * N, [{"""label""": ANY(__SCREAMING_SNAKE_CASE ), """score""": ANY(__SCREAMING_SNAKE_CASE )}] * N],)
__lowerCAmelCase = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
__lowerCAmelCase = text_classifier(__SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ),{"""label""": ANY(__SCREAMING_SNAKE_CASE ), """score""": ANY(__SCREAMING_SNAKE_CASE )},)
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__lowerCAmelCase = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
text_classifier(__SCREAMING_SNAKE_CASE )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__lowerCAmelCase = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ),[{"""label""": ANY(__SCREAMING_SNAKE_CASE ), """score""": ANY(__SCREAMING_SNAKE_CASE )}],)
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 689 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=lowerCAmelCase_ ):
a : List[str] =["""onnx"""]
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(self,["""onnx"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""onnx"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""onnx"""] )
| 689 | 1 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : Union[str, Any] = {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[str] ="""segformer"""
def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=4,__SCREAMING_SNAKE_CASE=[2, 2, 2, 2],__SCREAMING_SNAKE_CASE=[8, 4, 2, 1],__SCREAMING_SNAKE_CASE=[32, 64, 1_60, 2_56],__SCREAMING_SNAKE_CASE=[7, 3, 3, 3],__SCREAMING_SNAKE_CASE=[4, 2, 2, 2],__SCREAMING_SNAKE_CASE=[1, 2, 5, 8],__SCREAMING_SNAKE_CASE=[4, 4, 4, 4],__SCREAMING_SNAKE_CASE="gelu",__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=1e-6,__SCREAMING_SNAKE_CASE=2_56,__SCREAMING_SNAKE_CASE=2_55,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""",__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = num_channels
__lowerCAmelCase = num_encoder_blocks
__lowerCAmelCase = depths
__lowerCAmelCase = sr_ratios
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = patch_sizes
__lowerCAmelCase = strides
__lowerCAmelCase = mlp_ratios
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = classifier_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = decoder_hidden_size
__lowerCAmelCase = kwargs.get("""reshape_last_stage""",__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = semantic_loss_ignore_index
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[Any] =version.parse("""1.11""" )
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 1e-4
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 12
| 689 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_a : int = logging.get_logger(__name__)
_a : Optional[int] = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[str] ="""gptj"""
a : Optional[int] ={
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self,__SCREAMING_SNAKE_CASE=5_04_00,__SCREAMING_SNAKE_CASE=20_48,__SCREAMING_SNAKE_CASE=40_96,__SCREAMING_SNAKE_CASE=28,__SCREAMING_SNAKE_CASE=16,__SCREAMING_SNAKE_CASE=64,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="gelu_new",__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=1e-5,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=False,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_embd
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_inner
__lowerCAmelCase = rotary_dim
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = use_cache
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(
bos_token_id=__SCREAMING_SNAKE_CASE,eos_token_id=__SCREAMING_SNAKE_CASE,tie_word_embeddings=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = "default",__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False,):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE,task=__SCREAMING_SNAKE_CASE,patching_specs=__SCREAMING_SNAKE_CASE,use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config,"""pad_token_id""",__SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
__lowerCAmelCase = 0
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE,direction="""inputs""" )
__lowerCAmelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._config.n_head
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,):
'''simple docstring'''
__lowerCAmelCase = super(__SCREAMING_SNAKE_CASE,self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE,batch_size=__SCREAMING_SNAKE_CASE,seq_length=__SCREAMING_SNAKE_CASE,is_pair=__SCREAMING_SNAKE_CASE,framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowerCAmelCase = seqlen + 2
__lowerCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCAmelCase = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
__lowerCAmelCase = common_inputs["""attention_mask"""]
if self.use_past:
__lowerCAmelCase = ordered_inputs["""attention_mask"""].dtype
__lowerCAmelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,dtype=__SCREAMING_SNAKE_CASE )],dim=1 )
return ordered_inputs
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 13
| 689 | 1 |
'''simple docstring'''
_a : List[str] = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def _lowerCAmelCase ( ) -> None:
__lowerCAmelCase = input("""Enter message: """ )
__lowerCAmelCase = input("""Enter key [alphanumeric]: """ )
__lowerCAmelCase = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
__lowerCAmelCase = """encrypt"""
__lowerCAmelCase = encrypt_message(lowercase , lowercase )
elif mode.lower().startswith("""d""" ):
__lowerCAmelCase = """decrypt"""
__lowerCAmelCase = decrypt_message(lowercase , lowercase )
print(f'\n{mode.title()}ed message:' )
print(lowercase )
def _lowerCAmelCase ( lowercase , lowercase ) -> str:
return translate_message(lowercase , lowercase , """encrypt""" )
def _lowerCAmelCase ( lowercase , lowercase ) -> str:
return translate_message(lowercase , lowercase , """decrypt""" )
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> str:
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = key.upper()
for symbol in message:
__lowerCAmelCase = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(lowercase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(lowercase ):
__lowerCAmelCase = 0
else:
translated.append(lowercase )
return "".join(lowercase )
if __name__ == "__main__":
main()
| 689 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase = 5000_0000 ) -> int:
__lowerCAmelCase = set()
__lowerCAmelCase = int((limit - 24) ** (1 / 2) )
__lowerCAmelCase = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowercase ) ) )
for primea in primes:
__lowerCAmelCase = primea * primea
for primea in primes:
__lowerCAmelCase = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
__lowerCAmelCase = primea * primea * primea * primea
__lowerCAmelCase = square + cube + tetr
if total >= limit:
break
ret.add(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(f'{solution() = }')
| 689 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_a : Tuple = """\
"""
_a : Tuple = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
_a : Optional[Any] = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ),reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = 16,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__lowerCAmelCase = """cuda"""
else:
__lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__lowerCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__SCREAMING_SNAKE_CASE ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__lowerCAmelCase = model.config.max_length - 1
else:
__lowerCAmelCase = model.config.max_length
__lowerCAmelCase = tokenizer(
__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,padding=__SCREAMING_SNAKE_CASE,truncation=__SCREAMING_SNAKE_CASE,max_length=__SCREAMING_SNAKE_CASE,return_tensors="""pt""",return_attention_mask=__SCREAMING_SNAKE_CASE,).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = encodings["""input_ids"""]
__lowerCAmelCase = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ),1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ),2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__lowerCAmelCase = []
__lowerCAmelCase = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0,len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase = min(start_index + batch_size,len(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = encoded_texts[start_index:end_index]
__lowerCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
__lowerCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch],dim=1 )
__lowerCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size(),dtype=torch.intaa ).to(__SCREAMING_SNAKE_CASE ), attn_mask],dim=1 )
__lowerCAmelCase = encoded_batch
with torch.no_grad():
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE ).logits
__lowerCAmelCase = out_logits[..., :-1, :].contiguous()
__lowerCAmelCase = labels[..., 1:].contiguous()
__lowerCAmelCase = attn_mask[..., 1:].contiguous()
__lowerCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1,2 ),__SCREAMING_SNAKE_CASE ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__SCREAMING_SNAKE_CASE )}
| 689 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Optional[int] =TextToVideoSDPipeline
a : Optional[int] =TEXT_TO_IMAGE_PARAMS
a : Any =TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a : Union[str, Any] =frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D"""),up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D"""),cross_attention_dim=32,attention_head_dim=4,)
__lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0085,beta_end=0.012,beta_schedule="""scaled_linear""",clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,)
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],latent_channels=4,sample_size=1_28,)
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1e-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=10_00,hidden_act="""gelu""",projection_dim=5_12,)
__lowerCAmelCase = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = TextToVideoSDPipeline(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = """np"""
__lowerCAmelCase = sd_pipe(**__SCREAMING_SNAKE_CASE ).frames
__lowerCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__lowerCAmelCase = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available(),reason="""XFormers attention is only available with CUDA and `xformers` installed""",)
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
__lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowerCAmelCase = pipe.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=25,output_type="""pt""" ).frames
__lowerCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
__lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase = pipe.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=2,output_type="""pt""" ).frames
__lowerCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 689 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : List[str] =DanceDiffusionPipeline
a : Dict =UNCONDITIONAL_AUDIO_GENERATION_PARAMS
a : List[str] =PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
a : Any =UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
a : Any =False
a : str =False
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 32, 64),extra_in_channels=16,sample_size=5_12,sample_rate=1_60_00,in_channels=2,out_channels=2,flip_sin_to_cos=__SCREAMING_SNAKE_CASE,use_timestep_embedding=__SCREAMING_SNAKE_CASE,time_embedding_type="""fourier""",mid_block_type="""UNetMidBlock1D""",down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D"""),up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip"""),)
__lowerCAmelCase = IPNDMScheduler()
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 4,
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = DanceDiffusionPipeline(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = output.audios
__lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
__lowerCAmelCase = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCamelCase__ ( self ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def lowerCamelCase__ ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def lowerCamelCase__ ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def lowerCamelCase__ ( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = torch_device
__lowerCAmelCase = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" )
__lowerCAmelCase = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pipe(generator=__SCREAMING_SNAKE_CASE,num_inference_steps=1_00,audio_length_in_s=4.096 )
__lowerCAmelCase = output.audios
__lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__lowerCAmelCase = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = torch_device
__lowerCAmelCase = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""",torch_dtype=torch.floataa )
__lowerCAmelCase = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pipe(generator=__SCREAMING_SNAKE_CASE,num_inference_steps=1_00,audio_length_in_s=4.096 )
__lowerCAmelCase = output.audios
__lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__lowerCAmelCase = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 689 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _lowerCAmelCase ( lowercase ) -> Optional[int]:
if not is_accelerate_available():
return method
__lowerCAmelCase = version.parse(accelerate.__version__ ).base_version
if version.parse(lowercase ) < version.parse("""0.17.0""" ):
return method
def wrapper(self , *lowercase , **lowercase ):
if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ):
self._hf_hook.pre_forward(self )
return method(self , *lowercase , **lowercase )
return wrapper
| 689 | 1 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a : Union[str, Any] = logging.get_logger(__name__)
_a : int = {"""vocab_file""": """vocab.txt"""}
_a : List[str] = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
_a : Optional[int] = {
"""openbmb/cpm-ant-10b""": 1_0_2_4,
}
def _lowerCAmelCase ( lowercase ) -> List[str]:
__lowerCAmelCase = collections.OrderedDict()
with open(lowercase , """r""" , encoding="""utf-8""" ) as reader:
__lowerCAmelCase = reader.readlines()
for index, token in enumerate(lowercase ):
__lowerCAmelCase = token.rstrip("""\n""" )
__lowerCAmelCase = index
return vocab
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE="<unk>",__SCREAMING_SNAKE_CASE=2_00 ):
'''simple docstring'''
__lowerCAmelCase = vocab
__lowerCAmelCase = unk_token
__lowerCAmelCase = max_input_chars_per_word
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = list(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > self.max_input_chars_per_word:
return [self.unk_token]
__lowerCAmelCase = 0
__lowerCAmelCase = []
while start < len(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = None
while start < end:
__lowerCAmelCase = """""".join(chars[start:end] )
if substr in self.vocab:
__lowerCAmelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = end
return sub_tokens
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Dict =VOCAB_FILES_NAMES
a : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
a : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[Any] =["""input_ids""", """attention_mask"""]
a : Union[str, Any] =False
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE="<d>",__SCREAMING_SNAKE_CASE="</d>",__SCREAMING_SNAKE_CASE="<s>",__SCREAMING_SNAKE_CASE="</s>",__SCREAMING_SNAKE_CASE="<pad>",__SCREAMING_SNAKE_CASE="<unk>",__SCREAMING_SNAKE_CASE="</n>",__SCREAMING_SNAKE_CASE="</_>",__SCREAMING_SNAKE_CASE="left",**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
requires_backends(self,["""jieba"""] )
super().__init__(
bod_token=__SCREAMING_SNAKE_CASE,eod_token=__SCREAMING_SNAKE_CASE,bos_token=__SCREAMING_SNAKE_CASE,eos_token=__SCREAMING_SNAKE_CASE,pad_token=__SCREAMING_SNAKE_CASE,unk_token=__SCREAMING_SNAKE_CASE,line_token=__SCREAMING_SNAKE_CASE,space_token=__SCREAMING_SNAKE_CASE,padding_side=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = bod_token
__lowerCAmelCase = eod_token
__lowerCAmelCase = load_vocab(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.encoder[space_token]
__lowerCAmelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__lowerCAmelCase = collections.OrderedDict(sorted(self.encoder.items(),key=lambda __SCREAMING_SNAKE_CASE : x[1] ) )
__lowerCAmelCase = {v: k for k, v in self.encoder.items()}
__lowerCAmelCase = WordpieceTokenizer(vocab=self.encoder,unk_token=self.unk_token )
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.encoder[self.bod_token]
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.encoder[self.eod_token]
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.encoder["\n"]
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return len(self.encoder )
def lowerCamelCase__ ( self ):
'''simple docstring'''
return dict(self.encoder,**self.added_tokens_encoder )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = []
for x in jieba.cut(__SCREAMING_SNAKE_CASE,cut_all=__SCREAMING_SNAKE_CASE ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) )
return output_tokens
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = [i for i in token_ids if i >= 0]
__lowerCAmelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return token in self.encoder
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return "".join(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.encoder.get(__SCREAMING_SNAKE_CASE,self.encoder.get(self.unk_token ) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.decoder.get(__SCREAMING_SNAKE_CASE,self.unk_token )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if os.path.isdir(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
__lowerCAmelCase = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
__lowerCAmelCase = 0
if " " in self.encoder:
__lowerCAmelCase = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
__lowerCAmelCase = self.encoder["""\n"""]
del self.encoder["\n"]
__lowerCAmelCase = collections.OrderedDict(sorted(self.encoder.items(),key=lambda __SCREAMING_SNAKE_CASE : x[1] ) )
with open(__SCREAMING_SNAKE_CASE,"""w""",encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
""" Please check that the vocabulary is not corrupted!""" )
__lowerCAmelCase = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE,token_ids_a=__SCREAMING_SNAKE_CASE,already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE ))
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE ))
| 689 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
# load base model
__lowerCAmelCase = StableDiffusionPipeline.from_pretrained(lowercase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
__lowerCAmelCase = load_file(lowercase )
__lowerCAmelCase = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
__lowerCAmelCase = pipeline.text_encoder
else:
__lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
__lowerCAmelCase = pipeline.unet
# find the target layer
__lowerCAmelCase = layer_infos.pop(0 )
while len(lowercase ) > -1:
try:
__lowerCAmelCase = curr_layer.__getattr__(lowercase )
if len(lowercase ) > 0:
__lowerCAmelCase = layer_infos.pop(0 )
elif len(lowercase ) == 0:
break
except Exception:
if len(lowercase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
__lowerCAmelCase = layer_infos.pop(0 )
__lowerCAmelCase = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(lowercase )
else:
pair_keys.append(lowercase )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
__lowerCAmelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
__lowerCAmelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase ).unsqueeze(2 ).unsqueeze(3 )
else:
__lowerCAmelCase = state_dict[pair_keys[0]].to(torch.floataa )
__lowerCAmelCase = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase )
# update visited list
for item in pair_keys:
visited.append(lowercase )
return pipeline
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
_a : Optional[int] = parser.parse_args()
_a : Dict = args.base_model_path
_a : Optional[Any] = args.checkpoint_path
_a : Union[str, Any] = args.dump_path
_a : Optional[int] = args.lora_prefix_unet
_a : int = args.lora_prefix_text_encoder
_a : str = args.alpha
_a : Any = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_a : Tuple = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 689 | 1 |
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
@register_to_config
def __init__( self,*,
__SCREAMING_SNAKE_CASE = 4,__SCREAMING_SNAKE_CASE = 7_68,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = nn.Parameter(torch.zeros(__SCREAMING_SNAKE_CASE ) )
# parameters for additional clip time embeddings
__lowerCAmelCase = nn.Linear(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = nn.Linear(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# parameters for encoder hidden states
__lowerCAmelCase = clip_extra_context_tokens
__lowerCAmelCase = nn.Linear(
__SCREAMING_SNAKE_CASE,self.clip_extra_context_tokens * cross_attention_dim )
__lowerCAmelCase = nn.Linear(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = nn.LayerNorm(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,*, __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
__lowerCAmelCase = image_embeddings.shape[0]
__lowerCAmelCase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
__lowerCAmelCase = classifier_free_guidance_embeddings.expand(
__SCREAMING_SNAKE_CASE,-1 )
__lowerCAmelCase = torch.cat([classifier_free_guidance_embeddings, image_embeddings],dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
__lowerCAmelCase = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
__lowerCAmelCase = self.embedding_proj(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.clip_image_embeddings_project_to_time_embeddings(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
__lowerCAmelCase = self.clip_extra_context_tokens_proj(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = clip_extra_context_tokens.reshape(__SCREAMING_SNAKE_CASE,-1,self.clip_extra_context_tokens )
__lowerCAmelCase = clip_extra_context_tokens.permute(0,2,1 )
__lowerCAmelCase = self.encoder_hidden_states_proj(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.text_encoder_hidden_states_norm(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states],dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 689 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def _lowerCAmelCase ( lowercase = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2
def _lowerCAmelCase ( lowercase = "" ) -> bool:
if len(lowercase ) == 0:
return True
__lowerCAmelCase = input_str.replace(""" """ , """""" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__lowerCAmelCase = {}
for character in lower_case_input_str:
__lowerCAmelCase = character_freq_dict.get(lowercase , 0 ) + 1
__lowerCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _lowerCAmelCase ( lowercase = "" ) -> None:
print("""\nFor string = """ , lowercase , """:""" )
print(
"""> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(lowercase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
print(
"""> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(lowercase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
if __name__ == "__main__":
_a : int = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
_a : Optional[int] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
| 689 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.