code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
UpperCamelCase : List[Any] = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def __snake_case ( UpperCamelCase__ ) -> Dict:
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
"""simple docstring"""
if args.student_type == "roberta":
A = False
elif args.student_type == "gpt2":
A = False
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
if args.student_type == "roberta":
A = False
def __snake_case ( ) -> str:
"""simple docstring"""
A = argparse.ArgumentParser(description='Training' )
parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' )
parser.add_argument(
'--dump_path' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='The output directory (log, checkpoints, parameters, etc.)' )
parser.add_argument(
'--data_file' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , )
parser.add_argument(
'--student_type' , type=UpperCamelCase__ , choices=['distilbert', 'roberta', 'gpt2'] , required=UpperCamelCase__ , help='The student type (DistilBERT, RoBERTa).' , )
parser.add_argument('--student_config' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='Path to the student configuration.' )
parser.add_argument(
'--student_pretrained_weights' , default=UpperCamelCase__ , type=UpperCamelCase__ , help='Load student initialization checkpoint.' )
parser.add_argument(
'--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=UpperCamelCase__ , help='Teacher type (BERT, RoBERTa).' )
parser.add_argument('--teacher_name' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='The teacher model.' )
parser.add_argument('--temperature' , default=2.0 , type=UpperCamelCase__ , help='Temperature for the softmax temperature.' )
parser.add_argument(
'--alpha_ce' , default=0.5 , type=UpperCamelCase__ , help='Linear weight for the distillation loss. Must be >=0.' )
parser.add_argument(
'--alpha_mlm' , default=0.0 , type=UpperCamelCase__ , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , )
parser.add_argument('--alpha_clm' , default=0.5 , type=UpperCamelCase__ , help='Linear weight for the CLM loss. Must be >=0.' )
parser.add_argument('--alpha_mse' , default=0.0 , type=UpperCamelCase__ , help='Linear weight of the MSE loss. Must be >=0.' )
parser.add_argument(
'--alpha_cos' , default=0.0 , type=UpperCamelCase__ , help='Linear weight of the cosine embedding loss. Must be >=0.' )
parser.add_argument(
'--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' )
parser.add_argument(
'--mlm_mask_prop' , default=0.1_5 , type=UpperCamelCase__ , help='Proportion of tokens for which we need to make a prediction.' , )
parser.add_argument('--word_mask' , default=0.8 , type=UpperCamelCase__ , help='Proportion of tokens to mask out.' )
parser.add_argument('--word_keep' , default=0.1 , type=UpperCamelCase__ , help='Proportion of tokens to keep.' )
parser.add_argument('--word_rand' , default=0.1 , type=UpperCamelCase__ , help='Proportion of tokens to randomly replace.' )
parser.add_argument(
'--mlm_smoothing' , default=0.7 , type=UpperCamelCase__ , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , )
parser.add_argument('--token_counts' , type=UpperCamelCase__ , help='The token counts in the data_file for MLM.' )
parser.add_argument(
'--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , )
parser.add_argument(
'--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , )
parser.add_argument(
'--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , )
parser.add_argument('--n_epoch' , type=UpperCamelCase__ , default=3 , help='Number of pass on the whole dataset.' )
parser.add_argument('--batch_size' , type=UpperCamelCase__ , default=5 , help='Batch size (for each process).' )
parser.add_argument(
'--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , )
parser.add_argument(
'--gradient_accumulation_steps' , type=UpperCamelCase__ , default=50 , help='Gradient accumulation for larger training batches.' , )
parser.add_argument('--warmup_prop' , default=0.0_5 , type=UpperCamelCase__ , help='Linear warmup proportion.' )
parser.add_argument('--weight_decay' , default=0.0 , type=UpperCamelCase__ , help='Weight decay if we apply some.' )
parser.add_argument('--learning_rate' , default=5E-4 , type=UpperCamelCase__ , help='The initial learning rate for Adam.' )
parser.add_argument('--adam_epsilon' , default=1E-6 , type=UpperCamelCase__ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , default=5.0 , type=UpperCamelCase__ , help='Max gradient norm.' )
parser.add_argument('--initializer_range' , default=0.0_2 , type=UpperCamelCase__ , help='Random initialization range.' )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=UpperCamelCase__ , default='O1' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_gpu' , type=UpperCamelCase__ , default=1 , help='Number of GPUs in the node.' )
parser.add_argument('--local_rank' , type=UpperCamelCase__ , default=-1 , help='Distributed training - Local rank' )
parser.add_argument('--seed' , type=UpperCamelCase__ , default=56 , help='Random seed' )
parser.add_argument('--log_interval' , type=UpperCamelCase__ , default=500 , help='Tensorboard logging interval.' )
parser.add_argument('--checkpoint_interval' , type=UpperCamelCase__ , default=4000 , help='Checkpoint interval.' )
A = parser.parse_args()
sanity_checks(UpperCamelCase__ )
# ARGS #
init_gpu_params(UpperCamelCase__ )
set_seed(UpperCamelCase__ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'
' itUse `--force` if you want to overwrite it' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'Experiment will be dumped and logged in {args.dump_path}' )
# SAVE PARAMS #
logger.info(f'Param: {args}' )
with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f:
json.dump(vars(UpperCamelCase__ ) , UpperCamelCase__ , indent=4 )
git_log(args.dump_path )
A , A , A = MODEL_CLASSES[args.student_type]
A , A , A = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
A = teacher_tokenizer_class.from_pretrained(args.teacher_name )
A = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
A = tokenizer.all_special_tokens.index(UpperCamelCase__ )
A = tokenizer.all_special_ids[idx]
logger.info(f'Special tokens {special_tok_ids}' )
A = special_tok_ids
A = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'Loading data from {args.data_file}' )
with open(args.data_file , 'rb' ) as fp:
A = pickle.load(UpperCamelCase__ )
if args.mlm:
logger.info(f'Loading token counts from {args.token_counts} (already pre-computed)' )
with open(args.token_counts , 'rb' ) as fp:
A = pickle.load(UpperCamelCase__ )
A = np.maximum(UpperCamelCase__ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
A = 0.0 # do not predict special tokens
A = torch.from_numpy(UpperCamelCase__ )
else:
A = None
A = LmSeqsDataset(params=UpperCamelCase__ , data=UpperCamelCase__ )
logger.info('Data loader created.' )
# STUDENT #
logger.info(f'Loading student config from {args.student_config}' )
A = student_config_class.from_pretrained(args.student_config )
A = True
if args.student_pretrained_weights is not None:
logger.info(f'Loading pretrained weights from {args.student_pretrained_weights}' )
A = student_model_class.from_pretrained(args.student_pretrained_weights , config=UpperCamelCase__ )
else:
A = student_model_class(UpperCamelCase__ )
if args.n_gpu > 0:
student.to(f'cuda:{args.local_rank}' )
logger.info('Student loaded.' )
# TEACHER #
A = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=UpperCamelCase__ )
if args.n_gpu > 0:
teacher.to(f'cuda:{args.local_rank}' )
logger.info(f'Teacher loaded from {args.teacher_name}.' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(UpperCamelCase__ , UpperCamelCase__ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(UpperCamelCase__ , UpperCamelCase__ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
A = Distiller(
params=UpperCamelCase__ , dataset=UpperCamelCase__ , token_probs=UpperCamelCase__ , student=UpperCamelCase__ , teacher=UpperCamelCase__ )
distiller.train()
logger.info('Let\'s go get some drinks.' )
if __name__ == "__main__":
main()
| 690 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = ["""pixel_values"""]
def __init__( self : List[str] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 255 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : bool = True , **_lowercase : Tuple , ):
super().__init__(**_lowercase )
A = size if size is not None else {'shortest_edge': 224}
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(_lowercase , default_to_square=_lowercase , param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A = image_std if image_std is not None else OPENAI_CLIP_STD
A = do_convert_rgb
def __a ( self : str , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : List[str] , ):
A = get_size_dict(_lowercase , default_to_square=_lowercase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(_lowercase , size=size['shortest_edge'] , default_to_square=_lowercase )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
A = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_lowercase , size=(size['height'], size['width']) , data_format=_lowercase , **_lowercase )
def __a ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : List[str] , ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Union[str, Any] , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : Optional[int] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : int = None , _lowercase : bool = None , _lowercase : float = None , _lowercase : bool = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : bool = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[ChannelDimension] = ChannelDimension.FIRST , **_lowercase : int , ):
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(_lowercase , param_name='size' , default_to_square=_lowercase )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(_lowercase , param_name='crop_size' , default_to_square=_lowercase )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A = [convert_to_rgb(_lowercase ) for image in images]
# All transformations expect numpy arrays.
A = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
A = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_center_crop:
A = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images]
if do_rescale:
A = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
A = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
A = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 690 | 1 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCamelCase : Optional[Any] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCamelCase : List[str] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCamelCase : Any = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_000))
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> tuple[str, float]:
"""simple docstring"""
A = len([g for position, g in enumerate(UpperCamelCase__ ) if g == main_target[position]] )
return (item, float(UpperCamelCase__ ))
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> tuple[str, str]:
"""simple docstring"""
A = random.randint(0 , len(UpperCamelCase__ ) - 1 )
A = parent_a[:random_slice] + parent_a[random_slice:]
A = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> str:
"""simple docstring"""
A = list(UpperCamelCase__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
A = random.choice(UpperCamelCase__ )
return "".join(UpperCamelCase__ )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> list[str]:
"""simple docstring"""
A = []
# Generate more children proportionally to the fitness score.
A = int(parent_a[1] * 100 ) + 1
A = 10 if child_n >= 10 else child_n
for _ in range(UpperCamelCase__ ):
A = population_score[random.randint(0 , UpperCamelCase__ )][0]
A , A = crossover(parent_a[0] , UpperCamelCase__ )
# Append new string to the population list.
pop.append(mutate(UpperCamelCase__ , UpperCamelCase__ ) )
pop.append(mutate(UpperCamelCase__ , UpperCamelCase__ ) )
return pop
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
A = f'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(UpperCamelCase__ )
# Verify that the target contains no genes besides the ones inside genes variable.
A = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
A = f'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(UpperCamelCase__ )
# Generate random starting population.
A = []
for _ in range(UpperCamelCase__ ):
population.append(''.join([random.choice(UpperCamelCase__ ) for i in range(len(UpperCamelCase__ ) )] ) )
# Just some logs to know what the algorithms is doing.
A , A = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(UpperCamelCase__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
A = [evaluate(UpperCamelCase__ , UpperCamelCase__ ) for item in population]
# Check if there is a matching evolution.
A = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[1] , reverse=UpperCamelCase__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'\nGeneration: {generation}'
f'\nTotal Population:{total_population}'
f'\nBest score: {population_score[0][1]}'
f'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
A = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(UpperCamelCase__ )
# Normalize population score to be between 0 and 1.
A = [
(item, score / len(UpperCamelCase__ )) for item, score in population_score
]
# This is selection
for i in range(UpperCamelCase__ ):
population.extend(select(population_score[int(UpperCamelCase__ )] , UpperCamelCase__ , UpperCamelCase__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(UpperCamelCase__ ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCamelCase : Union[str, Any] = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
UpperCamelCase : List[Any] = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = basic(target_str, genes_list)
print(
F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 690 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Union[str, Any] ):
A = torch.nn.Linear(10 , 10 )
A = torch.optim.SGD(model.parameters() , 0.1 )
A = Accelerator()
A = accelerator.prepare(_lowercase )
try:
pickle.loads(pickle.dumps(_lowercase ) )
except Exception as e:
self.fail(f'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state()
| 690 | 1 |
"""simple docstring"""
def __snake_case ( ) -> Dict:
"""simple docstring"""
A = []
A = 1
while len(UpperCamelCase__ ) < 1E6:
constant.append(str(UpperCamelCase__ ) )
i += 1
A = ''.join(UpperCamelCase__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 690 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """convbert"""
def __init__( self : Optional[int] , _lowercase : List[Any]=30_522 , _lowercase : List[str]=768 , _lowercase : Optional[Any]=12 , _lowercase : Any=12 , _lowercase : str=3_072 , _lowercase : List[str]="gelu" , _lowercase : Dict=0.1 , _lowercase : Dict=0.1 , _lowercase : Any=512 , _lowercase : List[str]=2 , _lowercase : Tuple=0.0_2 , _lowercase : List[Any]=1e-12 , _lowercase : List[str]=1 , _lowercase : Tuple=0 , _lowercase : Any=2 , _lowercase : Union[str, Any]=768 , _lowercase : str=2 , _lowercase : Any=9 , _lowercase : Union[str, Any]=1 , _lowercase : Dict=None , **_lowercase : Union[str, Any] , ):
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase , )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = embedding_size
A = head_ratio
A = conv_kernel_size
A = num_groups
A = classifier_dropout
class lowerCamelCase__ ( UpperCAmelCase_ ):
@property
def __a ( self : str ):
if self.task == "multiple-choice":
A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 690 | 1 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __snake_case ( UpperCamelCase__ = "laptop" ) -> DataFrame:
"""simple docstring"""
A = f'https://www.amazon.in/laptop/s?k={product}'
A = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
A = BeautifulSoup(requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).text )
# Initialize a Pandas dataframe with the column titles
A = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
A = item.ha.text
A = 'https://www.amazon.in/' + item.ha.a['href']
A = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
A = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
A = 'Not available'
try:
A = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
A = ''
try:
A = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 100 )
except ValueError:
A = float('nan' )
except AttributeError:
pass
A = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A = ' '
A = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
UpperCamelCase : Any = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 690 |
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 690 | 1 |
"""simple docstring"""
def __snake_case ( UpperCamelCase__ ) -> list[int]:
"""simple docstring"""
A = [0 for i in range(len(UpperCamelCase__ ) )]
# initialize interval's left pointer and right pointer
A , A = 0, 0
for i in range(1 , len(UpperCamelCase__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
A = min(right_pointer - i + 1 , z_result[i - left_pointer] )
A = min_edge
while go_next(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
A , A = i, i + z_result[i] - 1
return z_result
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
"""simple docstring"""
return i + z_result[i] < len(UpperCamelCase__ ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
"""simple docstring"""
A = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
A = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(UpperCamelCase__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def __snake_case ( UpperCamelCase__ ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
A = {'+', '-', '*', '/'}
A = []
for token in postfix_notation:
if token in operations:
A , A = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCamelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class lowerCamelCase__ :
def __init__( self : Dict , _lowercase : int ):
A = value
A = None
A = None
class lowerCamelCase__ :
def __init__( self : List[str] , _lowercase : Node ):
A = tree
def __a ( self : str , _lowercase : Node | None ):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Tuple ):
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCamelCase : Any = None
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : str = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCamelCase : Optional[int] = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCamelCase : str = "▁"
# Segments (not really needed)
UpperCamelCase : str = 0
UpperCamelCase : int = 1
UpperCamelCase : List[Any] = 2
UpperCamelCase : Union[str, Any] = 3
UpperCamelCase : Optional[Any] = 4
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = """left"""
lowerCAmelCase = XLNetTokenizer
def __init__( self : Tuple , _lowercase : List[Any]=None , _lowercase : Any=None , _lowercase : int=False , _lowercase : Tuple=True , _lowercase : Union[str, Any]=False , _lowercase : int="<s>" , _lowercase : Optional[int]="</s>" , _lowercase : Dict="<unk>" , _lowercase : Optional[int]="<sep>" , _lowercase : int="<pad>" , _lowercase : Dict="<cls>" , _lowercase : str="<mask>" , _lowercase : List[str]=["<eop>", "<eod>"] , **_lowercase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it
A = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
vocab_file=_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
A = 3
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = False if not self.vocab_file else True
def __a ( self : List[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 690 | 1 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
UpperCamelCase : List[str] = logging.getLogger(__name__)
if __name__ == "__main__":
UpperCamelCase : Optional[Any] = argparse.ArgumentParser(
description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"
)
parser.add_argument(
"--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset."
)
parser.add_argument(
"--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file."
)
parser.add_argument("--vocab_size", default=30_522, type=int)
UpperCamelCase : Tuple = parser.parse_args()
logger.info(F"""Loading data from {args.data_file}""")
with open(args.data_file, "rb") as fp:
UpperCamelCase : Optional[int] = pickle.load(fp)
logger.info("Counting occurrences for MLM.")
UpperCamelCase : Any = Counter()
for tk_ids in data:
counter.update(tk_ids)
UpperCamelCase : List[str] = [0] * args.vocab_size
for k, v in counter.items():
UpperCamelCase : Tuple = v
logger.info(F"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, "wb") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 690 |
"""simple docstring"""
from __future__ import annotations
UpperCamelCase : Any = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the reference grid
A = 1
A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the action grid
A = init[0]
A = init[1]
A = 0
A = g + heuristic[x][y] # cost from starting cell to destination cell
A = [[f, g, x, y]]
A = False # flag that is set when search is complete
A = False # flag set if we can't find expand
while not found and not resign:
if len(UpperCamelCase__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
A = cell.pop()
A = next_cell[2]
A = next_cell[3]
A = next_cell[1]
if x == goal[0] and y == goal[1]:
A = True
else:
for i in range(len(UpperCamelCase__ ) ): # to try out different valid actions
A = x + DIRECTIONS[i][0]
A = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCamelCase__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
A = g + cost
A = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
A = 1
A = i
A = []
A = goal[0]
A = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
A = x - DIRECTIONS[action[x][y]][0]
A = y - DIRECTIONS[action[x][y]][1]
A = xa
A = ya
invpath.append([x, y] )
A = []
for i in range(len(UpperCamelCase__ ) ):
path.append(invpath[len(UpperCamelCase__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCamelCase : Any = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCamelCase : List[Any] = [0, 0]
# all coordinates are given in format [y,x]
UpperCamelCase : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCamelCase : Tuple = 1
# the cost map which pushes the path closer to the goal
UpperCamelCase : Union[str, Any] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCamelCase : List[str] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCamelCase : Dict = 99
UpperCamelCase , UpperCamelCase : Optional[Any] = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 690 | 1 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = IFImgaImgSuperResolutionPipeline
lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
lowerCAmelCase = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __a ( self : int ):
return self._get_superresolution_dummy_components()
def __a ( self : Optional[int] , _lowercase : str , _lowercase : str=0 ):
if str(_lowercase ).startswith('mps' ):
A = torch.manual_seed(_lowercase )
else:
A = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
A = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
A = floats_tensor((1, 3, 16, 16) , rng=random.Random(_lowercase ) ).to(_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __a ( self : Any ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __a ( self : Dict ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __a ( self : Union[str, Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __a ( self : str ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __a ( self : int ):
self._test_save_load_local()
def __a ( self : Optional[int] ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 690 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : int = {"vocab_file": "sentencepiece.model"}
UpperCamelCase : Union[str, Any] = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
}
UpperCamelCase : Union[str, Any] = {
"google/rembert": 256,
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Dict=True , _lowercase : List[str]=True , _lowercase : int="[CLS]" , _lowercase : str="[SEP]" , _lowercase : List[str]="[UNK]" , _lowercase : List[Any]="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : List[str]="[CLS]" , _lowercase : Any="[MASK]" , **_lowercase : Optional[Any] , ):
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = spm.SentencePieceProcessor()
self.sp_model.Load(_lowercase )
@property
def __a ( self : Tuple ):
return len(self.sp_model )
def __a ( self : List[str] ):
A = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : List[str] , _lowercase : int ):
A = d
A = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __a ( self : Dict , _lowercase : Union[str, Any] , _lowercase : Dict=False ):
A = self.sp_model.EncodeAsPieces(_lowercase )
return pieces
def __a ( self : Dict , _lowercase : Tuple ):
return self.sp_model.PieceToId(_lowercase )
def __a ( self : str , _lowercase : Optional[int] ):
return self.sp_model.IdToPiece(_lowercase )
def __a ( self : Optional[int] , _lowercase : Optional[int] ):
A = self.sp_model.decode_pieces(_lowercase )
return out_string
def __a ( self : Optional[int] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def __a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error('Vocabulary path ({}) should be a directory'.format(_lowercase ) )
return
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 690 | 1 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
UpperCamelCase : Union[str, Any] = {"UserAgent": UserAgent().random}
def __snake_case ( UpperCamelCase__ ) -> dict:
"""simple docstring"""
A = script.contents[0]
A = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowerCamelCase__ :
def __init__( self : List[Any] , _lowercase : Tuple ):
A = f'https://www.instagram.com/{username}/'
A = self.get_json()
def __a ( self : int ):
A = requests.get(self.url , headers=_lowercase ).text
A = BeautifulSoup(_lowercase , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : List[str] ):
return f'{self.__class__.__name__}(\'{self.username}\')'
def __str__( self : Dict ):
return f'{self.fullname} ({self.username}) is {self.biography}'
@property
def __a ( self : List[Any] ):
return self.user_data["username"]
@property
def __a ( self : Tuple ):
return self.user_data["full_name"]
@property
def __a ( self : int ):
return self.user_data["biography"]
@property
def __a ( self : List[Any] ):
return self.user_data["business_email"]
@property
def __a ( self : int ):
return self.user_data["external_url"]
@property
def __a ( self : List[Any] ):
return self.user_data["edge_followed_by"]["count"]
@property
def __a ( self : Tuple ):
return self.user_data["edge_follow"]["count"]
@property
def __a ( self : List[str] ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __a ( self : str ):
return self.user_data["profile_pic_url_hd"]
@property
def __a ( self : Optional[int] ):
return self.user_data["is_verified"]
@property
def __a ( self : List[Any] ):
return self.user_data["is_private"]
def __snake_case ( UpperCamelCase__ = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
A = InstagramUser(UpperCamelCase__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , UpperCamelCase__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : Dict = InstagramUser("github")
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 690 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : List[str] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : List[Any] = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
UpperCamelCase : Any = {"mobilebert-uncased": 512}
UpperCamelCase : Any = {}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = MobileBertTokenizer
def __init__( self : Optional[int] , _lowercase : Optional[int]=None , _lowercase : Any=None , _lowercase : Optional[int]=True , _lowercase : int="[UNK]" , _lowercase : Dict="[SEP]" , _lowercase : Any="[PAD]" , _lowercase : str="[CLS]" , _lowercase : Union[str, Any]="[MASK]" , _lowercase : List[Any]=True , _lowercase : Any=None , **_lowercase : Optional[Any] , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , _lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowercase ) != tokenize_chinese_chars
):
A = getattr(_lowercase , normalizer_state.pop('type' ) )
A = do_lower_case
A = strip_accents
A = tokenize_chinese_chars
A = normalizer_class(**_lowercase )
A = do_lower_case
def __a ( self : List[Any] , _lowercase : Tuple , _lowercase : Any=None ):
A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Dict , _lowercase : str , _lowercase : Optional[str] = None ):
A = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 690 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self : List[str] , _lowercase : Optional[Any] , _lowercase : int=7 , _lowercase : List[str]=3 , _lowercase : Tuple=18 , _lowercase : Dict=30 , _lowercase : Any=400 , _lowercase : int=True , _lowercase : List[Any]=None , _lowercase : Tuple=True , _lowercase : List[Any]=False , _lowercase : str=True , _lowercase : List[str]=True , _lowercase : int=[0.5, 0.5, 0.5] , _lowercase : Optional[int]=[0.5, 0.5, 0.5] , ):
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size if size is not None else {'height': 18, 'width': 20}
A = do_thumbnail
A = do_align_axis
A = do_pad
A = do_normalize
A = image_mean
A = image_std
def __a ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = DonutImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
A = DonutImageProcessingTester(self )
@property
def __a ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Union[str, Any] ):
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , 'do_resize' ) )
self.assertTrue(hasattr(_lowercase , 'size' ) )
self.assertTrue(hasattr(_lowercase , 'do_thumbnail' ) )
self.assertTrue(hasattr(_lowercase , 'do_align_long_axis' ) )
self.assertTrue(hasattr(_lowercase , 'do_pad' ) )
self.assertTrue(hasattr(_lowercase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowercase , 'image_mean' ) )
self.assertTrue(hasattr(_lowercase , 'image_std' ) )
def __a ( self : int ):
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
A = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
A = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def __a ( self : Any ):
pass
@is_flaky()
def __a ( self : int ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __a ( self : List[str] ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __a ( self : List[Any] ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 690 |
"""simple docstring"""
def __snake_case ( UpperCamelCase__ ) -> list[int]:
"""simple docstring"""
A = [0 for i in range(len(UpperCamelCase__ ) )]
# initialize interval's left pointer and right pointer
A , A = 0, 0
for i in range(1 , len(UpperCamelCase__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
A = min(right_pointer - i + 1 , z_result[i - left_pointer] )
A = min_edge
while go_next(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
A , A = i, i + z_result[i] - 1
return z_result
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
"""simple docstring"""
return i + z_result[i] < len(UpperCamelCase__ ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
"""simple docstring"""
A = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
A = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(UpperCamelCase__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase : Dict = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ) -> Any:
"""simple docstring"""
if attention_mask is None:
A = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
A = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
A = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase__ :
def __init__( self : Optional[Any] , _lowercase : List[str] , _lowercase : Dict=13 , _lowercase : str=7 , _lowercase : List[Any]=True , _lowercase : Optional[int]=False , _lowercase : Optional[int]=99 , _lowercase : str=16 , _lowercase : Optional[Any]=2 , _lowercase : Optional[int]=4 , _lowercase : List[Any]=4 , _lowercase : Optional[Any]="gelu" , _lowercase : Optional[int]=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : List[str]=32 , _lowercase : str=2 , _lowercase : Any=1 , _lowercase : Tuple=0 , _lowercase : List[str]=0.0_2 , ):
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = eos_token_id
A = pad_token_id
A = bos_token_id
A = initializer_range
def __a ( self : Any ):
A = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
A = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
A = shift_tokens_right(_lowercase , 1 , 2 )
A = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowercase , )
A = prepare_blenderbot_inputs_dict(_lowercase , _lowercase , _lowercase )
return config, inputs_dict
def __a ( self : Optional[int] ):
A , A = self.prepare_config_and_inputs()
return config, inputs_dict
def __a ( self : Union[str, Any] , _lowercase : str , _lowercase : Any , _lowercase : Any ):
A = 20
A = model_class_name(_lowercase )
A = model.encode(inputs_dict['input_ids'] )
A , A = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
A = model.init_cache(decoder_input_ids.shape[0] , _lowercase , _lowercase )
A = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
A = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A = model.decode(
decoder_input_ids[:, :-1] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=_lowercase , decoder_position_ids=_lowercase , )
A = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
A = model.decode(
decoder_input_ids[:, -1:] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowercase , )
A = model.decode(_lowercase , _lowercase )
A = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
def __a ( self : List[str] , _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Dict ):
A = 20
A = model_class_name(_lowercase )
A = model.encode(inputs_dict['input_ids'] )
A , A = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
A = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
A = model.init_cache(decoder_input_ids.shape[0] , _lowercase , _lowercase )
A = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A = model.decode(
decoder_input_ids[:, :-1] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=_lowercase , decoder_position_ids=_lowercase , )
A = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
A = model.decode(
decoder_input_ids[:, -1:] , _lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowercase , decoder_position_ids=_lowercase , )
A = model.decode(_lowercase , _lowercase , decoder_attention_mask=_lowercase )
A = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
lowerCAmelCase = 99
def __a ( self : Any ):
A = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
A = input_ids.shape[0]
A = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __a ( self : List[Any] ):
A , A , A = self._get_config_and_data()
A = FlaxBlenderbotForConditionalGeneration(_lowercase )
A = lm_model(input_ids=_lowercase )
A = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowercase )
def __a ( self : int ):
A = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
A = FlaxBlenderbotForConditionalGeneration(_lowercase )
A = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
A = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
A = lm_model(input_ids=_lowercase , decoder_input_ids=_lowercase )
A = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowercase )
def __a ( self : Dict ):
A = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
A = shift_tokens_right(_lowercase , 1 , 2 )
A = np.equal(_lowercase , 1 ).astype(np.floataa ).sum()
A = np.equal(_lowercase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowercase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase , UpperCAmelCase_ ):
lowerCAmelCase = True
lowerCAmelCase = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def __a ( self : str ):
A = FlaxBlenderbotModelTester(self )
def __a ( self : Dict ):
A , A = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowercase , _lowercase , _lowercase )
def __a ( self : Any ):
A , A = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowercase , _lowercase , _lowercase )
def __a ( self : Tuple ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A = self._prepare_for_class(_lowercase , _lowercase )
A = model_class(_lowercase )
@jax.jit
def encode_jitted(_lowercase : Any , _lowercase : Optional[int]=None , **_lowercase : Optional[Any] ):
return model.encode(input_ids=_lowercase , attention_mask=_lowercase )
with self.subTest('JIT Enabled' ):
A = encode_jitted(**_lowercase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
A = encode_jitted(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for jitted_output, output in zip(_lowercase , _lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def __a ( self : str ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A = model_class(_lowercase )
A = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
A = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowercase : List[str] , _lowercase : List[Any] , _lowercase : Dict ):
return model.decode(
decoder_input_ids=_lowercase , decoder_attention_mask=_lowercase , encoder_outputs=_lowercase , )
with self.subTest('JIT Enabled' ):
A = decode_jitted(**_lowercase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
A = decode_jitted(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for jitted_output, output in zip(_lowercase , _lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __a ( self : Dict ):
for model_class_name in self.all_model_classes:
A = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
A = np.ones((1, 1) ) * model.config.eos_token_id
A = model(_lowercase )
self.assertIsNotNone(_lowercase )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def __a ( self : int ):
A = {'num_beams': 1, 'early_stopping': True, 'min_length': 15, 'max_length': 25}
A = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
A = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=_lowercase )
A = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
A = ['Sam']
A = tokenizer(_lowercase , return_tensors='jax' )
A = model.generate(**_lowercase , **_lowercase )
A = 'Sam is a great name. It means "sun" in Gaelic.'
A = tokenizer.batch_decode(_lowercase , **_lowercase )
assert generated_txt[0].strip() == tgt_text
| 690 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = LDMTextToImagePipeline
lowerCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
lowerCAmelCase = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase = False
def __a ( self : Dict ):
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
A = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
A = CLIPTextModel(_lowercase )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __a ( self : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any]=0 ):
if str(_lowercase ).startswith('mps' ):
A = torch.manual_seed(_lowercase )
else:
A = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : Any ):
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = LDMTextToImagePipeline(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_dummy_inputs(_lowercase )
A = pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
A = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : int , _lowercase : List[Any] , _lowercase : int=torch.floataa , _lowercase : int=0 ):
A = torch.manual_seed(_lowercase )
A = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
A = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : Union[str, Any] ):
A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_inputs(_lowercase )
A = pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
A = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
A = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : Tuple=torch.floataa , _lowercase : Optional[Any]=0 ):
A = torch.manual_seed(_lowercase )
A = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
A = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : List[str] ):
A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_inputs(_lowercase )
A = pipe(**_lowercase ).images[0]
A = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
A = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 690 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """naver-clova-ix/donut-base-finetuned-docvqa"""
lowerCAmelCase = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
lowerCAmelCase = """document_qa"""
lowerCAmelCase = AutoProcessor
lowerCAmelCase = VisionEncoderDecoderModel
lowerCAmelCase = ["""image""", """text"""]
lowerCAmelCase = ["""text"""]
def __init__( self : Tuple , *_lowercase : Optional[Any] , **_lowercase : Optional[Any] ):
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*_lowercase , **_lowercase )
def __a ( self : Optional[Any] , _lowercase : "Image" , _lowercase : str ):
A = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
A = task_prompt.replace('{user_input}' , _lowercase )
A = self.pre_processor.tokenizer(
_lowercase , add_special_tokens=_lowercase , return_tensors='pt' ).input_ids
A = self.pre_processor(_lowercase , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __a ( self : Dict , _lowercase : Optional[int] ):
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_lowercase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_lowercase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_lowercase , ).sequences
def __a ( self : int , _lowercase : Tuple ):
A = self.pre_processor.batch_decode(_lowercase )[0]
A = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
A = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
A = re.sub(R'<.*?>' , '' , _lowercase , count=1 ).strip() # remove first task start token
A = self.pre_processor.tokenajson(_lowercase )
return sequence["answer"]
| 690 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
A = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase , cache_dir=_lowercase )
A = [t[-1] for t in os.walk(os.path.join(_lowercase , os.listdir(_lowercase )[0] , 'snapshots' ) )]
A = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 4
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1e-3
assert np.abs(np.abs(_lowercase , dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1
A = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_lowercase ) == num_samples
def __a ( self : Dict ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1
def __a ( self : List[str] ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __a ( self : str ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __a ( self : Any ):
A = FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , set_alpha_to_one=_lowercase , steps_offset=1 , )
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_lowercase , safety_checker=_lowercase , )
A = scheduler.create_state()
A = scheduler_state
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1
def __a ( self : List[str] ):
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.device_count()
A = num_samples * [prompt]
A = jax.random.split(jax.random.PRNGKey(0 ) , _lowercase )
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , )
A = replicate(_lowercase )
A = pipeline.prepare_inputs(_lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
A = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , use_memory_efficient_attention=_lowercase , )
A = replicate(_lowercase )
A = pipeline.prepare_inputs(_lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
A = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 690 | 1 |
"""simple docstring"""
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def __snake_case ( UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
if not is_accelerate_available():
return method
A = version.parse(accelerate.__version__ ).base_version
if version.parse(UpperCamelCase__ ) < version.parse('0.17.0' ):
return method
def wrapper(self , *UpperCamelCase__ , **UpperCamelCase__ ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *UpperCamelCase__ , **UpperCamelCase__ )
return wrapper
| 690 |
"""simple docstring"""
import os
import sys
UpperCamelCase : Optional[int] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCamelCase : Dict = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
"""simple docstring"""
return AutoConfig.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
"""simple docstring"""
return AutoModel.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
| 690 | 1 |
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCamelCase : int = get_logger(__name__)
class lowerCamelCase__ :
lowerCAmelCase = """dummy_data"""
lowerCAmelCase = """datasets"""
lowerCAmelCase = False
def __init__( self : List[str] , _lowercase : str , _lowercase : str , _lowercase : Union[Version, str] , _lowercase : Optional[str] = None , _lowercase : bool = False , _lowercase : bool = True , _lowercase : Optional[List[Callable]] = None , ):
A = 0
A = dataset_name
A = cache_dir
A = use_local_dummy_data
A = config
# download_callbacks take a single url as input
A = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
A = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
A = str(_lowercase )
# to be downloaded
A = None
A = None
@property
def __a ( self : Dict ):
if self._dummy_file is None:
A = self.download_dummy_data()
return self._dummy_file
@property
def __a ( self : Union[str, Any] ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def __a ( self : Optional[Any] ):
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def __a ( self : str ):
A = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
A = cached_path(
_lowercase , cache_dir=self.cache_dir , extract_compressed_file=_lowercase , force_extract=_lowercase )
return os.path.join(_lowercase , self.dummy_file_name )
@property
def __a ( self : str ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __a ( self : Optional[Any] ):
if self._bucket_url is None:
A = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def __a ( self : Tuple ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def __a ( self : Optional[Any] , _lowercase : Tuple , *_lowercase : Optional[Any] ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
A = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
A = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_lowercase , _lowercase ):
return self.create_dummy_data_dict(_lowercase , _lowercase )
elif isinstance(_lowercase , (list, tuple) ):
return self.create_dummy_data_list(_lowercase , _lowercase )
else:
return self.create_dummy_data_single(_lowercase , _lowercase )
def __a ( self : Tuple , _lowercase : str , *_lowercase : int ):
return self.download_and_extract(_lowercase )
def __a ( self : Union[str, Any] , _lowercase : Any , _lowercase : Tuple ):
return self.download_and_extract(_lowercase )
def __a ( self : Optional[int] , _lowercase : Any , *_lowercase : List[Any] , **_lowercase : List[Any] ):
return path
def __a ( self : List[Any] ):
return {}
def __a ( self : Tuple , _lowercase : Any , _lowercase : Dict ):
A = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_lowercase , _lowercase ):
for single_url in single_urls:
download_callback(_lowercase )
else:
A = single_urls
download_callback(_lowercase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_lowercase , _lowercase ):
A = [os.path.join(_lowercase , urllib.parse.quote_plus(Path(_lowercase ).name ) ) for x in single_urls]
else:
A = single_urls
A = os.path.join(_lowercase , urllib.parse.quote_plus(Path(_lowercase ).name ) )
A = value
# make sure that values are unique
if all(isinstance(_lowercase , _lowercase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
A = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __a ( self : List[str] , _lowercase : Dict , _lowercase : Tuple ):
A = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
A = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , _lowercase ) ) for url in data_url )
A = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
A = [data_url[0]] * len(_lowercase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_lowercase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
A = os.path.join(_lowercase , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(_lowercase )
return dummy_data_list
def __a ( self : Union[str, Any] , _lowercase : List[Any] , _lowercase : str ):
for download_callback in self.download_callbacks:
download_callback(_lowercase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
A = os.path.join(_lowercase , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(_lowercase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __a ( self : Tuple ):
pass
def __a ( self : Dict ):
pass
def __a ( self : Union[str, Any] , _lowercase : Optional[Any] ):
def _iter_archive_members(_lowercase : str ):
# this preserves the order of the members inside the ZIP archive
A = Path(self.dummy_file ).parent
A = path.relative_to(_lowercase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
A = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_lowercase )
A = Path(_lowercase )
A = _iter_archive_members(_lowercase ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(_lowercase ).as_posix(), file_path.open('rb' )
def __a ( self : int , _lowercase : Optional[int] ):
if not isinstance(_lowercase , _lowercase ):
A = [paths]
for path in paths:
if os.path.isfile(_lowercase ):
if os.path.basename(_lowercase ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_lowercase ):
if os.path.basename(_lowercase ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(_lowercase ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(_lowercase , _lowercase )
| 690 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase : List[str] = logging.get_logger(__name__)
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = ["""pixel_values"""]
def __init__( self : Tuple , _lowercase : bool = True , _lowercase : Optional[Dict[str, int]] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 255 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : List[str] , ):
super().__init__(**_lowercase )
A = size if size is not None else {'shortest_edge': 256}
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(_lowercase , param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self : Any , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Tuple , ):
A = get_size_dict(_lowercase , default_to_square=_lowercase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(_lowercase , size=size['shortest_edge'] , default_to_square=_lowercase )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : List[Any] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
A = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_lowercase , size=(size['height'], size['width']) , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : float , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Tuple ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : Any , _lowercase : ImageInput , _lowercase : Optional[bool] = None , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[float] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_lowercase : Any , ):
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(_lowercase , param_name='crop_size' )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
A = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_center_crop:
A = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images]
if do_rescale:
A = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
A = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
A = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def __a ( self : int , _lowercase : List[str] , _lowercase : List[Tuple] = None ):
A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(_lowercase ):
A = target_sizes.numpy()
A = []
for idx in range(len(_lowercase ) ):
A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=_lowercase )
A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
A = logits.argmax(dim=1 )
A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 690 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> dict[str, float]:
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if inductance < 0:
raise ValueError('Inductance cannot be negative' )
if frequency < 0:
raise ValueError('Frequency cannot be negative' )
if reactance < 0:
raise ValueError('Inductive reactance cannot be negative' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __snake_case ( UpperCamelCase__ = "laptop" ) -> DataFrame:
"""simple docstring"""
A = f'https://www.amazon.in/laptop/s?k={product}'
A = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
A = BeautifulSoup(requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).text )
# Initialize a Pandas dataframe with the column titles
A = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
A = item.ha.text
A = 'https://www.amazon.in/' + item.ha.a['href']
A = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
A = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
A = 'Not available'
try:
A = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
A = ''
try:
A = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 100 )
except ValueError:
A = float('nan' )
except AttributeError:
pass
A = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A = ' '
A = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
UpperCamelCase : Any = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 690 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
UpperCamelCase : Optional[int] = {
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
UpperCamelCase : List[str] = {
"RUCAIBox/mvp": 1_024,
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ["""input_ids""", """attention_mask"""]
lowerCAmelCase = MvpTokenizer
def __init__( self : Union[str, Any] , _lowercase : Optional[Any]=None , _lowercase : Optional[int]=None , _lowercase : Optional[Any]=None , _lowercase : List[str]="replace" , _lowercase : int="<s>" , _lowercase : Optional[Any]="</s>" , _lowercase : Tuple="</s>" , _lowercase : int="<s>" , _lowercase : Tuple="<unk>" , _lowercase : Tuple="<pad>" , _lowercase : List[str]="<mask>" , _lowercase : Union[str, Any]=False , _lowercase : int=True , **_lowercase : Optional[Any] , ):
super().__init__(
_lowercase , _lowercase , tokenizer_file=_lowercase , errors=_lowercase , bos_token=_lowercase , eos_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase , **_lowercase , )
A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _lowercase ) != add_prefix_space:
A = getattr(_lowercase , pre_tok_state.pop('type' ) )
A = add_prefix_space
A = pre_tok_class(**_lowercase )
A = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A = 'post_processor'
A = getattr(self.backend_tokenizer , _lowercase , _lowercase )
if tokenizer_component_instance:
A = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A = tuple(state['sep'] )
if "cls" in state:
A = tuple(state['cls'] )
A = False
if state.get('add_prefix_space' , _lowercase ) != add_prefix_space:
A = add_prefix_space
A = True
if state.get('trim_offsets' , _lowercase ) != trim_offsets:
A = trim_offsets
A = True
if changes_to_apply:
A = getattr(_lowercase , state.pop('type' ) )
A = component_class(**_lowercase )
setattr(self.backend_tokenizer , _lowercase , _lowercase )
@property
def __a ( self : str ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __a ( self : Tuple , _lowercase : Dict ):
A = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else value
A = value
def __a ( self : List[Any] , *_lowercase : Tuple , **_lowercase : Any ):
A = kwargs.get('is_split_into_words' , _lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*_lowercase , **_lowercase )
def __a ( self : Union[str, Any] , *_lowercase : Optional[Any] , **_lowercase : int ):
A = kwargs.get('is_split_into_words' , _lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._encode_plus(*_lowercase , **_lowercase )
def __a ( self : Any , _lowercase : str , _lowercase : Optional[str] = None ):
A = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
def __a ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Any=None ):
A = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __a ( self : Optional[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 690 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self : List[str] , _lowercase : Optional[Any] , _lowercase : int=7 , _lowercase : List[str]=3 , _lowercase : Tuple=18 , _lowercase : Dict=30 , _lowercase : Any=400 , _lowercase : int=True , _lowercase : List[Any]=None , _lowercase : Tuple=True , _lowercase : List[Any]=False , _lowercase : str=True , _lowercase : List[str]=True , _lowercase : int=[0.5, 0.5, 0.5] , _lowercase : Optional[int]=[0.5, 0.5, 0.5] , ):
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size if size is not None else {'height': 18, 'width': 20}
A = do_thumbnail
A = do_align_axis
A = do_pad
A = do_normalize
A = image_mean
A = image_std
def __a ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = DonutImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
A = DonutImageProcessingTester(self )
@property
def __a ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Union[str, Any] ):
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , 'do_resize' ) )
self.assertTrue(hasattr(_lowercase , 'size' ) )
self.assertTrue(hasattr(_lowercase , 'do_thumbnail' ) )
self.assertTrue(hasattr(_lowercase , 'do_align_long_axis' ) )
self.assertTrue(hasattr(_lowercase , 'do_pad' ) )
self.assertTrue(hasattr(_lowercase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowercase , 'image_mean' ) )
self.assertTrue(hasattr(_lowercase , 'image_std' ) )
def __a ( self : int ):
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
A = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
A = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def __a ( self : Any ):
pass
@is_flaky()
def __a ( self : int ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __a ( self : List[str] ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __a ( self : List[Any] ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 690 | 1 |
"""simple docstring"""
def __snake_case ( UpperCamelCase__ ) -> int:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), f'The input value of [n={number}] is not an integer'
if number == 1:
return 2
elif number < 1:
A = f'The input value of [n={number}] has to be > 0'
raise ValueError(UpperCamelCase__ )
else:
A = sylvester(number - 1 )
A = num - 1
A = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 690 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase__ :
def __init__( self : Optional[Any] , _lowercase : int=2 , _lowercase : Optional[Any]=3 , _lowercase : Any=64 , _lowercase : Tuple=None ):
A = np.random.default_rng(_lowercase )
A = length
A = rng.normal(size=(length,) ).astype(np.floataa )
A = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : str ):
return self.length
def __getitem__( self : List[str] , _lowercase : int ):
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase__ ( torch.nn.Module ):
def __init__( self : Optional[int] , _lowercase : Any=0 , _lowercase : List[Any]=0 , _lowercase : Optional[int]=False ):
super().__init__()
A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A = True
def __a ( self : Optional[Any] , _lowercase : str=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
A = False
return x * self.a[0] + self.b[0]
class lowerCamelCase__ ( torch.nn.Module ):
def __init__( self : Optional[Any] , _lowercase : Any=0 , _lowercase : List[str]=0 , _lowercase : str=False ):
super().__init__()
A = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
A = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
A = True
def __a ( self : int , _lowercase : Tuple=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
A = False
return x * self.a + self.b
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ = 16 ) -> Optional[Any]:
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
A = AutoTokenizer.from_pretrained('bert-base-cased' )
A = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
A = load_dataset('csv' , data_files=UpperCamelCase__ )
A = datasets['train'].unique('label' )
A = {v: i for i, v in enumerate(UpperCamelCase__ )}
def tokenize_function(UpperCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
A = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
if "label" in examples:
A = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(UpperCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(UpperCamelCase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
A = DataLoader(tokenized_datasets['train'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=2 )
A = DataLoader(tokenized_datasets['validation'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 690 | 1 |
"""simple docstring"""
def __snake_case ( UpperCamelCase__ ) -> bool:
"""simple docstring"""
if num < 0:
return False
A = num
A = 0
while num > 0:
A = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 |
"""simple docstring"""
from __future__ import annotations
def __snake_case ( UpperCamelCase__ ) -> list[int]: # This function is recursive
"""simple docstring"""
A = len(UpperCamelCase__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
A = array[0]
A = False
A = 1
A = []
while not is_found and i < array_length:
if array[i] < pivot:
A = True
A = [element for element in array[i:] if element >= array[i]]
A = longest_subsequence(UpperCamelCase__ )
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
A = temp_array
else:
i += 1
A = [element for element in array[1:] if element >= pivot]
A = [pivot, *longest_subsequence(UpperCamelCase__ )]
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 1 |
"""simple docstring"""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
"""simple docstring"""
A = old_name
if "patch_embed" in old_name:
A , A , A = old_name.split('.' )
if layer == "0":
A = old_name.replace('0' , 'convolution1' )
elif layer == "1":
A = old_name.replace('1' , 'batchnorm_before' )
elif layer == "3":
A = old_name.replace('3' , 'convolution2' )
else:
A = old_name.replace('4' , 'batchnorm_after' )
if "network" in old_name and re.search(r'\d\.\d' , UpperCamelCase__ ):
A = r'\b\d{2}\b'
if bool(re.search(UpperCamelCase__ , UpperCamelCase__ ) ):
A = re.search(r'\d\.\d\d.' , UpperCamelCase__ ).group()
else:
A = re.search(r'\d\.\d.' , UpperCamelCase__ ).group()
if int(match[0] ) < 6:
A = old_name.replace(UpperCamelCase__ , '' )
A = trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] )
A = 'intermediate_stages.' + trimmed_name
else:
A = old_name.replace(UpperCamelCase__ , '' )
if int(match[2] ) < num_meta4D_last_stage:
A = trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] )
else:
A = str(int(match[2] ) - num_meta4D_last_stage )
A = trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index )
if "norm1" in old_name:
A = trimmed_name.replace('norm1' , 'layernorm1' )
elif "norm2" in old_name:
A = trimmed_name.replace('norm2' , 'layernorm2' )
elif "fc1" in old_name:
A = trimmed_name.replace('fc1' , 'linear_in' )
elif "fc2" in old_name:
A = trimmed_name.replace('fc2' , 'linear_out' )
A = 'last_stage.' + trimmed_name
elif "network" in old_name and re.search(r'.\d.' , UpperCamelCase__ ):
A = old_name.replace('network' , 'intermediate_stages' )
if "fc" in new_name:
A = new_name.replace('fc' , 'convolution' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
A = new_name.replace('norm1' , 'batchnorm_before' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
A = new_name.replace('norm2' , 'batchnorm_after' )
if "proj" in new_name:
A = new_name.replace('proj' , 'projection' )
if "dist_head" in new_name:
A = new_name.replace('dist_head' , 'distillation_classifier' )
elif "head" in new_name:
A = new_name.replace('head' , 'classifier' )
elif "patch_embed" in new_name:
A = 'efficientformer.' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
A = new_name.replace('norm' , 'layernorm' )
A = 'efficientformer.' + new_name
else:
A = 'efficientformer.encoder.' + new_name
return new_name
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
for key in checkpoint.copy().keys():
A = checkpoint.pop(UpperCamelCase__ )
A = val
return checkpoint
def __snake_case ( ) -> List[Any]:
"""simple docstring"""
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return image
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
A = torch.load(UpperCamelCase__ , map_location='cpu' )['model']
A = EfficientFormerConfig.from_json_file(UpperCamelCase__ )
A = EfficientFormerForImageClassificationWithTeacher(UpperCamelCase__ )
A = '_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] )
A = config.depths[-1] - config.num_metaad_blocks + 1
A = convert_torch_checkpoint(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
A = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
# prepare image
A = prepare_img()
A = 256
A = 224
A = EfficientFormerImageProcessor(
size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , )
A = processor(images=UpperCamelCase__ , return_tensors='pt' ).pixel_values
# original processing pipeline
A = Compose(
[
Resize(UpperCamelCase__ , interpolation=pillow_resamplings['bicubic'] ),
CenterCrop(UpperCamelCase__ ),
ToTensor(),
Normalize(UpperCamelCase__ , UpperCamelCase__ ),
] )
A = image_transforms(UpperCamelCase__ ).unsqueeze(0 )
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ )
A = model(UpperCamelCase__ )
A = outputs.logits
A = (1, 1000)
if "l1" in model_name:
A = torch.Tensor(
[-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9, -0.5_1_2_4, 0.4_1_8_3, -0.6_7_9_3, -1.3_7_7_7, -0.0_8_9_3, -0.7_3_5_8, -2.4_3_2_8] )
assert torch.allclose(logits[0, :10] , UpperCamelCase__ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
A = torch.Tensor(
[-1.3_1_5_0, -1.5_4_5_6, -1.2_5_5_6, -0.8_4_9_6, -0.7_1_2_7, -0.7_8_9_7, -0.9_7_2_8, -0.3_0_5_2, 0.3_7_5_1, -0.3_1_2_7] )
assert torch.allclose(logits[0, :10] , UpperCamelCase__ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
A = torch.Tensor(
[-1.0_2_8_3, -1.4_1_3_1, -0.5_6_4_4, -1.3_1_1_5, -0.5_7_8_5, -1.2_0_4_9, -0.7_5_2_8, 0.1_9_9_2, -0.3_8_2_2, -0.0_8_7_8] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7' )
# Save Checkpoints
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
print(f'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
processor.save_pretrained(UpperCamelCase__ )
print(f'Processor successfuly saved at {pytorch_dump_path}' )
if push_to_hub:
print('Pushing model to the hub...' )
model.push_to_hub(
repo_id=f'Bearnardd/{pytorch_dump_path}' , commit_message='Add model' , use_temp_dir=UpperCamelCase__ , )
processor.push_to_hub(
repo_id=f'Bearnardd/{pytorch_dump_path}' , commit_message='Add image processor' , use_temp_dir=UpperCamelCase__ , )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
UpperCamelCase : Tuple = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 690 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
UpperCamelCase : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
UpperCamelCase : Optional[int] = typing.Union[np.floataa, int, float] # noqa: UP007
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> VectorOut:
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(UpperCamelCase__ ) - np.asarray(UpperCamelCase__ )) ** 2 ) )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> VectorOut:
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(UpperCamelCase__ , UpperCamelCase__ ) ) ** (1 / 2)
if __name__ == "__main__":
def __snake_case ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
benchmark()
| 690 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : List[Any] = logging.get_logger(__name__)
UpperCamelCase : Tuple = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """mra"""
def __init__( self : int , _lowercase : Any=50_265 , _lowercase : str=768 , _lowercase : Union[str, Any]=12 , _lowercase : Dict=12 , _lowercase : int=3_072 , _lowercase : Any="gelu" , _lowercase : str=0.1 , _lowercase : List[Any]=0.1 , _lowercase : Dict=512 , _lowercase : Any=1 , _lowercase : Tuple=0.0_2 , _lowercase : str=1e-5 , _lowercase : Any="absolute" , _lowercase : List[Any]=4 , _lowercase : Optional[int]="full" , _lowercase : Tuple=0 , _lowercase : Optional[Any]=0 , _lowercase : Dict=1 , _lowercase : Union[str, Any]=0 , _lowercase : Tuple=2 , **_lowercase : Dict , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = type_vocab_size
A = layer_norm_eps
A = position_embedding_type
A = block_per_row
A = approx_mode
A = initial_prior_first_n_blocks
A = initial_prior_diagonal_n_blocks
| 690 |
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
UpperCamelCase : List[Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False , ) -> Any:
"""simple docstring"""
output_path.parent.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , enable_onnx_checker=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
else:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
@torch.no_grad()
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> str:
"""simple docstring"""
A = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
A = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
A = 'cpu'
A = StableDiffusionPipeline.from_pretrained(UpperCamelCase__ , torch_dtype=UpperCamelCase__ ).to(UpperCamelCase__ )
A = Path(UpperCamelCase__ )
# TEXT ENCODER
A = pipeline.text_encoder.config.max_position_embeddings
A = pipeline.text_encoder.config.hidden_size
A = pipeline.tokenizer(
'A sample prompt' , padding='max_length' , max_length=pipeline.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors='pt' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=UpperCamelCase__ , dtype=torch.intaa )) , output_path=output_path / 'text_encoder' / 'model.onnx' , ordered_input_names=['input_ids'] , output_names=['last_hidden_state', 'pooler_output'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'sequence'},
} , opset=UpperCamelCase__ , )
del pipeline.text_encoder
# UNET
A = pipeline.unet.config.in_channels
A = pipeline.unet.config.sample_size
A = output_path / 'unet' / 'model.onnx'
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(2 ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(2 , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=UpperCamelCase__ , ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'] , output_names=['out_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'timestep': {0: 'batch'},
'encoder_hidden_states': {0: 'batch', 1: 'sequence'},
} , opset=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , )
A = str(unet_path.absolute().as_posix() )
A = os.path.dirname(UpperCamelCase__ )
A = onnx.load(UpperCamelCase__ )
# clean up existing tensor files
shutil.rmtree(UpperCamelCase__ )
os.mkdir(UpperCamelCase__ )
# collate external tensor files into one
onnx.save_model(
UpperCamelCase__ , UpperCamelCase__ , save_as_external_data=UpperCamelCase__ , all_tensors_to_one_file=UpperCamelCase__ , location='weights.pb' , convert_attribute=UpperCamelCase__ , )
del pipeline.unet
# VAE ENCODER
A = pipeline.vae
A = vae_encoder.config.in_channels
A = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
A = lambda UpperCamelCase__ , UpperCamelCase__ : vae_encoder.encode(UpperCamelCase__ , UpperCamelCase__ )[0].sample()
onnx_export(
UpperCamelCase__ , model_args=(
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=output_path / 'vae_encoder' / 'model.onnx' , ordered_input_names=['sample', 'return_dict'] , output_names=['latent_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=UpperCamelCase__ , )
# VAE DECODER
A = pipeline.vae
A = vae_decoder.config.latent_channels
A = vae_decoder.config.out_channels
# forward only through the decoder part
A = vae_encoder.decode
onnx_export(
UpperCamelCase__ , model_args=(
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=UpperCamelCase__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
A = pipeline.safety_checker
A = safety_checker.config.vision_config.num_channels
A = safety_checker.config.vision_config.image_size
A = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
) , output_path=output_path / 'safety_checker' / 'model.onnx' , ordered_input_names=['clip_input', 'images'] , output_names=['out_images', 'has_nsfw_concepts'] , dynamic_axes={
'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'},
} , opset=UpperCamelCase__ , )
del pipeline.safety_checker
A = OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker' )
A = pipeline.feature_extractor
else:
A = None
A = None
A = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet' ) , scheduler=pipeline.scheduler , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(UpperCamelCase__ )
print('ONNX pipeline saved to' , UpperCamelCase__ )
del pipeline
del onnx_pipeline
A = OnnxStableDiffusionPipeline.from_pretrained(UpperCamelCase__ , provider='CPUExecutionProvider' )
print('ONNX pipeline is loadable' )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
UpperCamelCase : str = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 690 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : List[str] = logging.get_logger(__name__)
UpperCamelCase : List[Any] = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """data2vec-text"""
def __init__( self : Any , _lowercase : Tuple=30_522 , _lowercase : Optional[int]=768 , _lowercase : Optional[int]=12 , _lowercase : Optional[int]=12 , _lowercase : List[Any]=3_072 , _lowercase : List[Any]="gelu" , _lowercase : List[str]=0.1 , _lowercase : str=0.1 , _lowercase : int=512 , _lowercase : int=2 , _lowercase : Tuple=0.0_2 , _lowercase : int=1e-12 , _lowercase : str=1 , _lowercase : Dict=0 , _lowercase : Tuple=2 , _lowercase : Dict="absolute" , _lowercase : List[str]=True , _lowercase : Optional[int]=None , **_lowercase : Optional[int] , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_act
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = position_embedding_type
A = use_cache
A = classifier_dropout
class lowerCamelCase__ ( UpperCAmelCase_ ):
@property
def __a ( self : Union[str, Any] ):
if self.task == "multiple-choice":
A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 690 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
UpperCamelCase : List[str] = Lock()
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
A = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
A = min(UpperCamelCase__ , UpperCamelCase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
A = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
A = max(UpperCamelCase__ , UpperCamelCase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(UpperCamelCase__ )
def __snake_case ( UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
A = []
A = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
A = Pipe()
A = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
A = temp_rs
A = temp_rr
for i in range(1 , len(UpperCamelCase__ ) - 1 ):
A = Pipe()
A = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
A = temp_rs
A = temp_rr
process_array_.append(
Process(
target=UpperCamelCase__ , args=(
len(UpperCamelCase__ ) - 1,
arr[len(UpperCamelCase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(UpperCamelCase__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(UpperCamelCase__ ) ):
A = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __snake_case ( ) -> Optional[Any]:
"""simple docstring"""
A = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*UpperCamelCase__ )
A = odd_even_transposition(UpperCamelCase__ )
print('Sorted List\n' )
print(*UpperCamelCase__ )
if __name__ == "__main__":
main()
| 690 | 1 |
"""simple docstring"""
from __future__ import annotations
UpperCamelCase : Any = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the reference grid
A = 1
A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the action grid
A = init[0]
A = init[1]
A = 0
A = g + heuristic[x][y] # cost from starting cell to destination cell
A = [[f, g, x, y]]
A = False # flag that is set when search is complete
A = False # flag set if we can't find expand
while not found and not resign:
if len(UpperCamelCase__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
A = cell.pop()
A = next_cell[2]
A = next_cell[3]
A = next_cell[1]
if x == goal[0] and y == goal[1]:
A = True
else:
for i in range(len(UpperCamelCase__ ) ): # to try out different valid actions
A = x + DIRECTIONS[i][0]
A = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCamelCase__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
A = g + cost
A = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
A = 1
A = i
A = []
A = goal[0]
A = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
A = x - DIRECTIONS[action[x][y]][0]
A = y - DIRECTIONS[action[x][y]][1]
A = xa
A = ya
invpath.append([x, y] )
A = []
for i in range(len(UpperCamelCase__ ) ):
path.append(invpath[len(UpperCamelCase__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCamelCase : Any = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCamelCase : List[Any] = [0, 0]
# all coordinates are given in format [y,x]
UpperCamelCase : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCamelCase : Tuple = 1
# the cost map which pushes the path closer to the goal
UpperCamelCase : Union[str, Any] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCamelCase : List[str] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCamelCase : Dict = 99
UpperCamelCase , UpperCamelCase : Optional[Any] = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 690 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
UpperCamelCase : int = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
UpperCamelCase : List[Any] = dataset.iloc[:, 1:2].values
UpperCamelCase : Any = dataset.iloc[:, 2].values
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = train_test_split(X, y, test_size=0.2, random_state=0)
UpperCamelCase : List[str] = PolynomialFeatures(degree=4)
UpperCamelCase : Optional[int] = poly_reg.fit_transform(X)
UpperCamelCase : List[Any] = LinearRegression()
pol_reg.fit(X_poly, y)
def __snake_case ( ) -> Optional[int]:
"""simple docstring"""
plt.scatter(UpperCamelCase__ , UpperCamelCase__ , color='red' )
plt.plot(UpperCamelCase__ , pol_reg.predict(poly_reg.fit_transform(UpperCamelCase__ ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 690 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase__ :
def __init__( self : Optional[Any] , _lowercase : int=2 , _lowercase : Optional[Any]=3 , _lowercase : Any=64 , _lowercase : Tuple=None ):
A = np.random.default_rng(_lowercase )
A = length
A = rng.normal(size=(length,) ).astype(np.floataa )
A = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : str ):
return self.length
def __getitem__( self : List[str] , _lowercase : int ):
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase__ ( torch.nn.Module ):
def __init__( self : Optional[int] , _lowercase : Any=0 , _lowercase : List[Any]=0 , _lowercase : Optional[int]=False ):
super().__init__()
A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A = True
def __a ( self : Optional[Any] , _lowercase : str=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
A = False
return x * self.a[0] + self.b[0]
class lowerCamelCase__ ( torch.nn.Module ):
def __init__( self : Optional[Any] , _lowercase : Any=0 , _lowercase : List[str]=0 , _lowercase : str=False ):
super().__init__()
A = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
A = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
A = True
def __a ( self : int , _lowercase : Tuple=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
A = False
return x * self.a + self.b
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ = 16 ) -> Optional[Any]:
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
A = AutoTokenizer.from_pretrained('bert-base-cased' )
A = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
A = load_dataset('csv' , data_files=UpperCamelCase__ )
A = datasets['train'].unique('label' )
A = {v: i for i, v in enumerate(UpperCamelCase__ )}
def tokenize_function(UpperCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
A = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
if "label" in examples:
A = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(UpperCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(UpperCamelCase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
A = DataLoader(tokenized_datasets['train'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=2 )
A = DataLoader(tokenized_datasets['validation'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 690 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = ["""pixel_values"""]
def __init__( self : List[str] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 255 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : bool = True , **_lowercase : Tuple , ):
super().__init__(**_lowercase )
A = size if size is not None else {'shortest_edge': 224}
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(_lowercase , default_to_square=_lowercase , param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A = image_std if image_std is not None else OPENAI_CLIP_STD
A = do_convert_rgb
def __a ( self : str , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : List[str] , ):
A = get_size_dict(_lowercase , default_to_square=_lowercase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(_lowercase , size=size['shortest_edge'] , default_to_square=_lowercase )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
A = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_lowercase , size=(size['height'], size['width']) , data_format=_lowercase , **_lowercase )
def __a ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : List[str] , ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Union[str, Any] , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : Optional[int] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : int = None , _lowercase : bool = None , _lowercase : float = None , _lowercase : bool = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : bool = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[ChannelDimension] = ChannelDimension.FIRST , **_lowercase : int , ):
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(_lowercase , param_name='size' , default_to_square=_lowercase )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(_lowercase , param_name='crop_size' , default_to_square=_lowercase )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A = [convert_to_rgb(_lowercase ) for image in images]
# All transformations expect numpy arrays.
A = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
A = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_center_crop:
A = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images]
if do_rescale:
A = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
A = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
A = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 690 | 1 |
"""simple docstring"""
import math
from datetime import datetime, timedelta
def __snake_case ( UpperCamelCase__ ) -> datetime:
"""simple docstring"""
A = year % 19
A = year % 4
A = year % 7
A = math.floor(year / 100 )
A = math.floor((13 + 8 * leap_day_inhibits) / 25 )
A = leap_day_inhibits / 4
A = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
A = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
A = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
A = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(UpperCamelCase__ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(UpperCamelCase__ , 4 , 18 )
else:
return datetime(UpperCamelCase__ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_994, 2_000, 2_010, 2_021, 2_023):
UpperCamelCase : Optional[Any] = "will be" if year > datetime.now().year else "was"
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 690 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Union[str, Any] ):
A = torch.nn.Linear(10 , 10 )
A = torch.optim.SGD(model.parameters() , 0.1 )
A = Accelerator()
A = accelerator.prepare(_lowercase )
try:
pickle.loads(pickle.dumps(_lowercase ) )
except Exception as e:
self.fail(f'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state()
| 690 | 1 |
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
UpperCamelCase : Tuple = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def __snake_case ( ) -> Any:
"""simple docstring"""
A = os.path.dirname(os.path.realpath(UpperCamelCase__ ) )
A = os.path.join(UpperCamelCase__ , 'words.txt' )
A = ''
with open(UpperCamelCase__ ) as f:
A = f.readline()
A = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
A = [
word
for word in [sum(ord(UpperCamelCase__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(UpperCamelCase__ )
if __name__ == "__main__":
print(solution())
| 690 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """convbert"""
def __init__( self : Optional[int] , _lowercase : List[Any]=30_522 , _lowercase : List[str]=768 , _lowercase : Optional[Any]=12 , _lowercase : Any=12 , _lowercase : str=3_072 , _lowercase : List[str]="gelu" , _lowercase : Dict=0.1 , _lowercase : Dict=0.1 , _lowercase : Any=512 , _lowercase : List[str]=2 , _lowercase : Tuple=0.0_2 , _lowercase : List[Any]=1e-12 , _lowercase : List[str]=1 , _lowercase : Tuple=0 , _lowercase : Any=2 , _lowercase : Union[str, Any]=768 , _lowercase : str=2 , _lowercase : Any=9 , _lowercase : Union[str, Any]=1 , _lowercase : Dict=None , **_lowercase : Union[str, Any] , ):
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase , )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = embedding_size
A = head_ratio
A = conv_kernel_size
A = num_groups
A = classifier_dropout
class lowerCamelCase__ ( UpperCAmelCase_ ):
@property
def __a ( self : str ):
if self.task == "multiple-choice":
A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 690 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase : Union[str, Any] = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 690 |
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 690 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
UpperCamelCase : List[str] = logging.get_logger(__name__)
@dataclass
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self : Optional[Any] , **_lowercase : Dict ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
A = deprecated_arg[3:]
setattr(self , _lowercase , not kwargs.pop(_lowercase ) )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
A = kwargs.pop('torchscript' , self.torchscript )
A = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics )
A = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level )
super().__init__(**_lowercase )
lowerCAmelCase = field(default=UpperCAmelCase_ , metadata={"""help""": """Trace the models using torchscript"""} )
lowerCAmelCase = field(default=UpperCAmelCase_ , metadata={"""help""": """Print Xla/PyTorch tpu metrics"""} )
lowerCAmelCase = field(
default="""O1""" , metadata={
"""help""": (
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. """
"""See details at https://nvidia.github.io/apex/amp.html"""
)
} , )
@cached_property
def __a ( self : List[Any] ):
requires_backends(self , ['torch'] )
logger.info('PyTorch: setting up devices' )
if not self.cuda:
A = torch.device('cpu' )
A = 0
elif is_torch_tpu_available():
A = xm.xla_device()
A = 0
else:
A = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
A = torch.cuda.device_count()
return device, n_gpu
@property
def __a ( self : List[Any] ):
return is_torch_tpu_available() and self.tpu
@property
def __a ( self : Dict ):
requires_backends(self , ['torch'] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def __a ( self : Union[str, Any] ):
requires_backends(self , ['torch'] )
return self._setup_devices[0]
@property
def __a ( self : Union[str, Any] ):
requires_backends(self , ['torch'] )
return self._setup_devices[1]
@property
def __a ( self : Tuple ):
return self.n_gpu > 0
| 690 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def __snake_case ( UpperCamelCase__ ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
A = {'+', '-', '*', '/'}
A = []
for token in postfix_notation:
if token in operations:
A , A = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCamelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 1 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = ["""audio_values""", """audio_mask"""]
def __init__( self : Dict , _lowercase : str=2_048 , _lowercase : Tuple=1 , _lowercase : int=[16, 16] , _lowercase : Dict=128 , _lowercase : Any=44_100 , _lowercase : List[str]=86 , _lowercase : Tuple=2_048 , _lowercase : Union[str, Any]=0.0 , **_lowercase : Optional[Any] , ):
super().__init__(
feature_size=_lowercase , sampling_rate=_lowercase , padding_value=_lowercase , **_lowercase , )
A = spectrogram_length
A = num_channels
A = patch_size
A = feature_size // self.patch_size[1]
A = n_fft
A = sampling_rate // hop_length_to_sampling_rate
A = sampling_rate
A = padding_value
A = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_lowercase , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=_lowercase , norm='slaney' , mel_scale='slaney' , ).T
def __a ( self : List[str] , _lowercase : np.array ):
A = spectrogram(
_lowercase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , )
A = log_spec[:, :-1]
A = log_spec - 2_0.0
A = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : List[str] , _lowercase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[bool] = True , _lowercase : Optional[int] = None , _lowercase : bool = False , _lowercase : bool = False , **_lowercase : int , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
A = isinstance(_lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
A = is_batched_numpy or (
isinstance(_lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_lowercase , np.ndarray ):
A = np.asarray(_lowercase , dtype=np.floataa )
elif isinstance(_lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
A = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , _lowercase ):
A = [np.asarray(_lowercase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
A = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
A = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
A = np.array(_lowercase ).astype(np.floataa )
# convert into correct format for padding
A = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
A = np.ones([len(_lowercase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
A = padded_audio_features * self.padding_value
for i in range(len(_lowercase ) ):
A = audio_features[i]
A = feature
# return as BatchFeature
if return_attention_mask:
A = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
A = {'audio_values': padded_audio_features}
A = BatchFeature(data=_lowercase , tensor_type=_lowercase )
return encoded_inputs
| 690 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCamelCase : Any = None
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : str = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCamelCase : Optional[int] = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCamelCase : str = "▁"
# Segments (not really needed)
UpperCamelCase : str = 0
UpperCamelCase : int = 1
UpperCamelCase : List[Any] = 2
UpperCamelCase : Union[str, Any] = 3
UpperCamelCase : Optional[Any] = 4
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = """left"""
lowerCAmelCase = XLNetTokenizer
def __init__( self : Tuple , _lowercase : List[Any]=None , _lowercase : Any=None , _lowercase : int=False , _lowercase : Tuple=True , _lowercase : Union[str, Any]=False , _lowercase : int="<s>" , _lowercase : Optional[int]="</s>" , _lowercase : Dict="<unk>" , _lowercase : Optional[int]="<sep>" , _lowercase : int="<pad>" , _lowercase : Dict="<cls>" , _lowercase : str="<mask>" , _lowercase : List[str]=["<eop>", "<eod>"] , **_lowercase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it
A = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
vocab_file=_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
A = 3
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = False if not self.vocab_file else True
def __a ( self : List[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 690 | 1 |
"""simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
UpperCamelCase : str = logging.getLogger(__name__)
class lowerCamelCase__ :
def __init__( self : Tuple ):
A = False
def __a ( self : List[Any] , _lowercase : int , _lowercase : Optional[Any] , _lowercase : Dict , _lowercase : List[Any] ):
if not self.initialized:
A = RagRetriever(
_lowercase , question_encoder_tokenizer=_lowercase , generator_tokenizer=_lowercase , index=_lowercase , init_retrieval=_lowercase , )
A = True
def __a ( self : Dict ):
self.retriever.index.init_index()
def __a ( self : Tuple , _lowercase : Union[str, Any] , _lowercase : Optional[Any] ):
A , A = self.retriever._main_retrieve(_lowercase , _lowercase )
return doc_ids, retrieved_doc_embeds
class lowerCamelCase__ ( UpperCAmelCase_ ):
def __init__( self : List[Any] , _lowercase : Dict , _lowercase : List[str] , _lowercase : Any , _lowercase : Optional[int] , _lowercase : Optional[int]=None ):
if index is not None and index.is_initialized() and len(_lowercase ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
_lowercase , question_encoder_tokenizer=_lowercase , generator_tokenizer=_lowercase , index=_lowercase , init_retrieval=_lowercase , )
A = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(_lowercase , _lowercase , _lowercase , _lowercase )
for worker in self.retrieval_workers
] )
def __a ( self : int ):
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __a ( self : Tuple , _lowercase : Any , _lowercase : Optional[Any] ):
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
A = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
A , A = ray.get(random_worker.retrieve.remote(_lowercase , _lowercase ) )
else:
A , A = self._main_retrieve(_lowercase , _lowercase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_lowercase )
@classmethod
def __a ( cls : int , _lowercase : List[Any] , _lowercase : List[str]=None , **_lowercase : Optional[Any] ):
return super(_lowercase , cls ).get_tokenizers(_lowercase , _lowercase , **_lowercase )
@classmethod
def __a ( cls : int , _lowercase : str , _lowercase : List[str] , _lowercase : List[Any]=None , **_lowercase : Dict ):
A = kwargs.pop('config' , _lowercase ) or RagConfig.from_pretrained(_lowercase , **_lowercase )
A = RagTokenizer.from_pretrained(_lowercase , config=_lowercase )
A = rag_tokenizer.question_encoder
A = rag_tokenizer.generator
if indexed_dataset is not None:
A = 'custom'
A = CustomHFIndex(config.retrieval_vector_size , _lowercase )
else:
A = cls._build_index(_lowercase )
return cls(
_lowercase , question_encoder_tokenizer=_lowercase , generator_tokenizer=_lowercase , retrieval_workers=_lowercase , index=_lowercase , )
| 690 |
"""simple docstring"""
from __future__ import annotations
UpperCamelCase : Any = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the reference grid
A = 1
A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the action grid
A = init[0]
A = init[1]
A = 0
A = g + heuristic[x][y] # cost from starting cell to destination cell
A = [[f, g, x, y]]
A = False # flag that is set when search is complete
A = False # flag set if we can't find expand
while not found and not resign:
if len(UpperCamelCase__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
A = cell.pop()
A = next_cell[2]
A = next_cell[3]
A = next_cell[1]
if x == goal[0] and y == goal[1]:
A = True
else:
for i in range(len(UpperCamelCase__ ) ): # to try out different valid actions
A = x + DIRECTIONS[i][0]
A = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCamelCase__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
A = g + cost
A = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
A = 1
A = i
A = []
A = goal[0]
A = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
A = x - DIRECTIONS[action[x][y]][0]
A = y - DIRECTIONS[action[x][y]][1]
A = xa
A = ya
invpath.append([x, y] )
A = []
for i in range(len(UpperCamelCase__ ) ):
path.append(invpath[len(UpperCamelCase__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCamelCase : Any = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCamelCase : List[Any] = [0, 0]
# all coordinates are given in format [y,x]
UpperCamelCase : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCamelCase : Tuple = 1
# the cost map which pushes the path closer to the goal
UpperCamelCase : Union[str, Any] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCamelCase : List[str] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCamelCase : Dict = 99
UpperCamelCase , UpperCamelCase : Optional[Any] = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 690 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = ["""pixel_values"""]
def __init__( self : List[str] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 255 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : bool = True , **_lowercase : Tuple , ):
super().__init__(**_lowercase )
A = size if size is not None else {'shortest_edge': 224}
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(_lowercase , default_to_square=_lowercase , param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A = image_std if image_std is not None else OPENAI_CLIP_STD
A = do_convert_rgb
def __a ( self : str , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : List[str] , ):
A = get_size_dict(_lowercase , default_to_square=_lowercase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(_lowercase , size=size['shortest_edge'] , default_to_square=_lowercase )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
A = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_lowercase , size=(size['height'], size['width']) , data_format=_lowercase , **_lowercase )
def __a ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : List[str] , ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Union[str, Any] , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : Optional[int] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : int = None , _lowercase : bool = None , _lowercase : float = None , _lowercase : bool = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : bool = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[ChannelDimension] = ChannelDimension.FIRST , **_lowercase : int , ):
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(_lowercase , param_name='size' , default_to_square=_lowercase )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(_lowercase , param_name='crop_size' , default_to_square=_lowercase )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A = [convert_to_rgb(_lowercase ) for image in images]
# All transformations expect numpy arrays.
A = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
A = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_center_crop:
A = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images]
if do_rescale:
A = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
A = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
A = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 690 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : int = {"vocab_file": "sentencepiece.model"}
UpperCamelCase : Union[str, Any] = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
}
UpperCamelCase : Union[str, Any] = {
"google/rembert": 256,
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Dict=True , _lowercase : List[str]=True , _lowercase : int="[CLS]" , _lowercase : str="[SEP]" , _lowercase : List[str]="[UNK]" , _lowercase : List[Any]="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : List[str]="[CLS]" , _lowercase : Any="[MASK]" , **_lowercase : Optional[Any] , ):
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = spm.SentencePieceProcessor()
self.sp_model.Load(_lowercase )
@property
def __a ( self : Tuple ):
return len(self.sp_model )
def __a ( self : List[str] ):
A = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : List[str] , _lowercase : int ):
A = d
A = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __a ( self : Dict , _lowercase : Union[str, Any] , _lowercase : Dict=False ):
A = self.sp_model.EncodeAsPieces(_lowercase )
return pieces
def __a ( self : Dict , _lowercase : Tuple ):
return self.sp_model.PieceToId(_lowercase )
def __a ( self : str , _lowercase : Optional[int] ):
return self.sp_model.IdToPiece(_lowercase )
def __a ( self : Optional[int] , _lowercase : Optional[int] ):
A = self.sp_model.decode_pieces(_lowercase )
return out_string
def __a ( self : Optional[int] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def __a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error('Vocabulary path ({}) should be a directory'.format(_lowercase ) )
return
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 690 | 1 |
"""simple docstring"""
def __snake_case ( ) -> Tuple:
"""simple docstring"""
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def __snake_case ( UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
A = 1
A = 2
while i * i <= n:
A = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def __snake_case ( ) -> Any:
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(UpperCamelCase__ ) > 500 )
if __name__ == "__main__":
print(solution())
| 690 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : List[str] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : List[Any] = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
UpperCamelCase : Any = {"mobilebert-uncased": 512}
UpperCamelCase : Any = {}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = MobileBertTokenizer
def __init__( self : Optional[int] , _lowercase : Optional[int]=None , _lowercase : Any=None , _lowercase : Optional[int]=True , _lowercase : int="[UNK]" , _lowercase : Dict="[SEP]" , _lowercase : Any="[PAD]" , _lowercase : str="[CLS]" , _lowercase : Union[str, Any]="[MASK]" , _lowercase : List[Any]=True , _lowercase : Any=None , **_lowercase : Optional[Any] , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , _lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowercase ) != tokenize_chinese_chars
):
A = getattr(_lowercase , normalizer_state.pop('type' ) )
A = do_lower_case
A = strip_accents
A = tokenize_chinese_chars
A = normalizer_class(**_lowercase )
A = do_lower_case
def __a ( self : List[Any] , _lowercase : Tuple , _lowercase : Any=None ):
A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Dict , _lowercase : str , _lowercase : Optional[str] = None ):
A = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 690 | 1 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
UpperCamelCase : int = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
UpperCamelCase : List[Any] = dataset.iloc[:, 1:2].values
UpperCamelCase : Any = dataset.iloc[:, 2].values
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = train_test_split(X, y, test_size=0.2, random_state=0)
UpperCamelCase : List[str] = PolynomialFeatures(degree=4)
UpperCamelCase : Optional[int] = poly_reg.fit_transform(X)
UpperCamelCase : List[Any] = LinearRegression()
pol_reg.fit(X_poly, y)
def __snake_case ( ) -> Optional[int]:
"""simple docstring"""
plt.scatter(UpperCamelCase__ , UpperCamelCase__ , color='red' )
plt.plot(UpperCamelCase__ , pol_reg.predict(poly_reg.fit_transform(UpperCamelCase__ ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 690 |
"""simple docstring"""
def __snake_case ( UpperCamelCase__ ) -> list[int]:
"""simple docstring"""
A = [0 for i in range(len(UpperCamelCase__ ) )]
# initialize interval's left pointer and right pointer
A , A = 0, 0
for i in range(1 , len(UpperCamelCase__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
A = min(right_pointer - i + 1 , z_result[i - left_pointer] )
A = min_edge
while go_next(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
A , A = i, i + z_result[i] - 1
return z_result
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
"""simple docstring"""
return i + z_result[i] < len(UpperCamelCase__ ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
"""simple docstring"""
A = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
A = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(UpperCamelCase__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 1 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
UpperCamelCase : Any = {
"vocab_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
},
"merges_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
},
}
UpperCamelCase : Union[str, Any] = {
"allenai/longformer-base-4096": 4_096,
"allenai/longformer-large-4096": 4_096,
"allenai/longformer-large-4096-finetuned-triviaqa": 4_096,
"allenai/longformer-base-4096-extra.pos.embd.only": 4_096,
"allenai/longformer-large-4096-extra.pos.embd.only": 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __snake_case ( ) -> Optional[Any]:
"""simple docstring"""
A = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
A = bs[:]
A = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase__ )
cs.append(2**8 + n )
n += 1
A = [chr(UpperCamelCase__ ) for n in cs]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
def __snake_case ( UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
return pairs
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any]="replace" , _lowercase : str="<s>" , _lowercase : List[str]="</s>" , _lowercase : str="</s>" , _lowercase : Optional[Any]="<s>" , _lowercase : List[Any]="<unk>" , _lowercase : List[str]="<pad>" , _lowercase : Optional[int]="<mask>" , _lowercase : Optional[int]=False , **_lowercase : List[str] , ):
A = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else bos_token
A = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else eos_token
A = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else sep_token
A = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else cls_token
A = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else unk_token
A = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
errors=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , add_prefix_space=_lowercase , **_lowercase , )
with open(_lowercase , encoding='utf-8' ) as vocab_handle:
A = json.load(_lowercase )
A = {v: k for k, v in self.encoder.items()}
A = errors # how to handle errors in decoding
A = bytes_to_unicode()
A = {v: k for k, v in self.byte_encoder.items()}
with open(_lowercase , encoding='utf-8' ) as merges_handle:
A = merges_handle.read().split('\n' )[1:-1]
A = [tuple(merge.split() ) for merge in bpe_merges]
A = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
A = {}
A = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def __a ( self : List[Any] ):
return len(self.encoder )
def __a ( self : int ):
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : List[Any] , _lowercase : List[Any] ):
if token in self.cache:
return self.cache[token]
A = tuple(_lowercase )
A = get_pairs(_lowercase )
if not pairs:
return token
while True:
A = min(_lowercase , key=lambda _lowercase : self.bpe_ranks.get(_lowercase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(_lowercase ):
try:
A = word.index(_lowercase , _lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(_lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(_lowercase )
A = new_word
if len(_lowercase ) == 1:
break
else:
A = get_pairs(_lowercase )
A = ' '.join(_lowercase )
A = word
return word
def __a ( self : List[str] , _lowercase : str ):
A = []
for token in re.findall(self.pat , _lowercase ):
A = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowercase ).split(' ' ) )
return bpe_tokens
def __a ( self : List[Any] , _lowercase : Dict ):
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def __a ( self : List[Any] , _lowercase : List[Any] ):
return self.decoder.get(_lowercase )
def __a ( self : Optional[Any] , _lowercase : Union[str, Any] ):
A = ''.join(_lowercase )
A = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def __a ( self : str , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowercase , ensure_ascii=_lowercase ) + '\n' )
A = 0
with open(_lowercase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
A = token_index
writer.write(' '.join(_lowercase ) + '\n' )
index += 1
return vocab_file, merge_file
def __a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A = [self.cls_token_id]
A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self : Union[str, Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
def __a ( self : Dict , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __a ( self : str , _lowercase : List[str] , _lowercase : Dict=False , **_lowercase : Tuple ):
A = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowercase ) > 0 and not text[0].isspace()):
A = ' ' + text
return (text, kwargs)
| 690 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = LDMTextToImagePipeline
lowerCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
lowerCAmelCase = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase = False
def __a ( self : Dict ):
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
A = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
A = CLIPTextModel(_lowercase )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __a ( self : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any]=0 ):
if str(_lowercase ).startswith('mps' ):
A = torch.manual_seed(_lowercase )
else:
A = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : Any ):
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = LDMTextToImagePipeline(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_dummy_inputs(_lowercase )
A = pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
A = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : int , _lowercase : List[Any] , _lowercase : int=torch.floataa , _lowercase : int=0 ):
A = torch.manual_seed(_lowercase )
A = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
A = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : Union[str, Any] ):
A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_inputs(_lowercase )
A = pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
A = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
A = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : Tuple=torch.floataa , _lowercase : Optional[Any]=0 ):
A = torch.manual_seed(_lowercase )
A = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
A = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : List[str] ):
A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_inputs(_lowercase )
A = pipe(**_lowercase ).images[0]
A = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
A = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 690 | 1 |
"""simple docstring"""
from math import factorial
def __snake_case ( UpperCamelCase__ = 100 ) -> int:
"""simple docstring"""
return sum(int(UpperCamelCase__ ) for x in str(factorial(UpperCamelCase__ ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 690 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
A = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase , cache_dir=_lowercase )
A = [t[-1] for t in os.walk(os.path.join(_lowercase , os.listdir(_lowercase )[0] , 'snapshots' ) )]
A = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 4
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1e-3
assert np.abs(np.abs(_lowercase , dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1
A = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_lowercase ) == num_samples
def __a ( self : Dict ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1
def __a ( self : List[str] ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __a ( self : str ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __a ( self : Any ):
A = FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , set_alpha_to_one=_lowercase , steps_offset=1 , )
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_lowercase , safety_checker=_lowercase , )
A = scheduler.create_state()
A = scheduler_state
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1
def __a ( self : List[str] ):
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.device_count()
A = num_samples * [prompt]
A = jax.random.split(jax.random.PRNGKey(0 ) , _lowercase )
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , )
A = replicate(_lowercase )
A = pipeline.prepare_inputs(_lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
A = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , use_memory_efficient_attention=_lowercase , )
A = replicate(_lowercase )
A = pipeline.prepare_inputs(_lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
A = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 690 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCamelCase__ :
def __init__( self : List[str] , _lowercase : Tuple , _lowercase : List[str]=3 , _lowercase : Optional[int]=32 , _lowercase : Union[str, Any]=3 , _lowercase : Optional[int]=10 , _lowercase : Optional[int]=[8, 16, 32, 64] , _lowercase : str=[1, 1, 2, 1] , _lowercase : Optional[int]=True , _lowercase : Optional[int]=True , _lowercase : Optional[Any]="relu" , _lowercase : Any=3 , _lowercase : str=None , _lowercase : Union[str, Any]=["stage2", "stage3", "stage4"] , _lowercase : Union[str, Any]=[2, 3, 4] , _lowercase : List[Any]=1 , ):
A = parent
A = batch_size
A = image_size
A = num_channels
A = embeddings_size
A = hidden_sizes
A = depths
A = is_training
A = use_labels
A = hidden_act
A = num_labels
A = scope
A = len(_lowercase )
A = out_features
A = out_indices
A = num_groups
def __a ( self : List[Any] ):
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.num_labels )
A = self.get_config()
return config, pixel_values, labels
def __a ( self : str ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __a ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : List[Any] , _lowercase : Optional[Any] ):
A = BitModel(config=_lowercase )
model.to(_lowercase )
model.eval()
A = model(_lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __a ( self : Union[str, Any] , _lowercase : List[str] , _lowercase : Dict , _lowercase : str ):
A = self.num_labels
A = BitForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
A = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[int] , _lowercase : int ):
A = BitBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
A = model(_lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A = None
A = BitBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
A = model(_lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __a ( self : str ):
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowerCAmelCase = (
{"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def __a ( self : Any ):
A = BitModelTester(self )
A = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def __a ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self : Dict ):
return
@unittest.skip(reason='Bit does not output attentions' )
def __a ( self : List[str] ):
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def __a ( self : List[str] ):
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def __a ( self : Any ):
pass
def __a ( self : str ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(_lowercase )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowercase )
def __a ( self : int ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def __a ( self : Optional[int] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowercase )
def __a ( self : List[str] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(config=_lowercase )
for name, module in model.named_modules():
if isinstance(_lowercase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def __a ( self : Optional[Any] ):
def check_hidden_states_output(_lowercase : Tuple , _lowercase : Any , _lowercase : str ):
A = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(_lowercase , _lowercase ) )
A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A = self.model_tester.num_stages
self.assertEqual(len(_lowercase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A = layer_type
A = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def __a ( self : Any ):
pass
def __a ( self : Dict ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def __a ( self : str ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = BitModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def __snake_case ( ) -> int:
"""simple docstring"""
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
@cached_property
def __a ( self : str ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __a ( self : Optional[Any] ):
A = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowercase )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=_lowercase , return_tensors='pt' ).to(_lowercase )
# forward pass
with torch.no_grad():
A = model(**_lowercase )
# verify the logits
A = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _lowercase )
A = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
@require_torch
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = (BitBackbone,) if is_torch_available() else ()
lowerCAmelCase = BitConfig
lowerCAmelCase = False
def __a ( self : Any ):
A = BitModelTester(self )
| 690 |
"""simple docstring"""
import os
import sys
UpperCamelCase : Optional[int] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCamelCase : Dict = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
"""simple docstring"""
return AutoConfig.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
"""simple docstring"""
return AutoModel.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
| 690 | 1 |
"""simple docstring"""
from collections.abc import Sequence
def __snake_case ( UpperCamelCase__ = None ) -> int:
"""simple docstring"""
if nums is None or not nums:
raise ValueError('Input sequence should not be empty' )
A = nums[0]
for i in range(1 , len(UpperCamelCase__ ) ):
A = nums[i]
A = max(UpperCamelCase__ , ans + num , UpperCamelCase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
UpperCamelCase : Any = int(input("Enter number of elements : ").strip())
UpperCamelCase : str = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 690 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase : List[str] = logging.get_logger(__name__)
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = ["""pixel_values"""]
def __init__( self : Tuple , _lowercase : bool = True , _lowercase : Optional[Dict[str, int]] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 255 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : List[str] , ):
super().__init__(**_lowercase )
A = size if size is not None else {'shortest_edge': 256}
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(_lowercase , param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self : Any , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Tuple , ):
A = get_size_dict(_lowercase , default_to_square=_lowercase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(_lowercase , size=size['shortest_edge'] , default_to_square=_lowercase )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : List[Any] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
A = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_lowercase , size=(size['height'], size['width']) , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : float , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Tuple ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : Any , _lowercase : ImageInput , _lowercase : Optional[bool] = None , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[float] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_lowercase : Any , ):
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(_lowercase , param_name='crop_size' )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
A = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_center_crop:
A = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images]
if do_rescale:
A = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
A = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
A = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def __a ( self : int , _lowercase : List[str] , _lowercase : List[Tuple] = None ):
A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(_lowercase ):
A = target_sizes.numpy()
A = []
for idx in range(len(_lowercase ) ):
A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=_lowercase )
A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
A = logits.argmax(dim=1 )
A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 690 | 1 |
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
UpperCamelCase : List[str] = parser.parse_args()
UpperCamelCase : List[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCamelCase : Dict = CLIPImageProcessor()
UpperCamelCase : List[Any] = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
UpperCamelCase : Dict = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 690 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __snake_case ( UpperCamelCase__ = "laptop" ) -> DataFrame:
"""simple docstring"""
A = f'https://www.amazon.in/laptop/s?k={product}'
A = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
A = BeautifulSoup(requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).text )
# Initialize a Pandas dataframe with the column titles
A = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
A = item.ha.text
A = 'https://www.amazon.in/' + item.ha.a['href']
A = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
A = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
A = 'Not available'
try:
A = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
A = ''
try:
A = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 100 )
except ValueError:
A = float('nan' )
except AttributeError:
pass
A = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A = ' '
A = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
UpperCamelCase : Any = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 690 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def __snake_case ( UpperCamelCase__ ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
A = {'+', '-', '*', '/'}
A = []
for token in postfix_notation:
if token in operations:
A , A = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCamelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self : List[str] , _lowercase : Optional[Any] , _lowercase : int=7 , _lowercase : List[str]=3 , _lowercase : Tuple=18 , _lowercase : Dict=30 , _lowercase : Any=400 , _lowercase : int=True , _lowercase : List[Any]=None , _lowercase : Tuple=True , _lowercase : List[Any]=False , _lowercase : str=True , _lowercase : List[str]=True , _lowercase : int=[0.5, 0.5, 0.5] , _lowercase : Optional[int]=[0.5, 0.5, 0.5] , ):
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size if size is not None else {'height': 18, 'width': 20}
A = do_thumbnail
A = do_align_axis
A = do_pad
A = do_normalize
A = image_mean
A = image_std
def __a ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = DonutImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
A = DonutImageProcessingTester(self )
@property
def __a ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Union[str, Any] ):
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , 'do_resize' ) )
self.assertTrue(hasattr(_lowercase , 'size' ) )
self.assertTrue(hasattr(_lowercase , 'do_thumbnail' ) )
self.assertTrue(hasattr(_lowercase , 'do_align_long_axis' ) )
self.assertTrue(hasattr(_lowercase , 'do_pad' ) )
self.assertTrue(hasattr(_lowercase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowercase , 'image_mean' ) )
self.assertTrue(hasattr(_lowercase , 'image_std' ) )
def __a ( self : int ):
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
A = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
A = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def __a ( self : Any ):
pass
@is_flaky()
def __a ( self : int ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __a ( self : List[str] ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __a ( self : List[Any] ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 690 | 1 |
"""simple docstring"""
from __future__ import annotations
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> tuple[int, int]:
"""simple docstring"""
if b == 0:
return (1, 0)
((A) , (A)) = extended_euclid(UpperCamelCase__ , a % b )
A = a // b
return (y, x - k * y)
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
"""simple docstring"""
((A) , (A)) = extended_euclid(UpperCamelCase__ , UpperCamelCase__ )
A = na * na
A = ra * x * na + ra * y * na
return (n % m + m) % m
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
"""simple docstring"""
((A) , (A)) = extended_euclid(UpperCamelCase__ , UpperCamelCase__ )
if b < 0:
A = (b % n + n) % n
return b
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
"""simple docstring"""
A , A = invert_modulo(UpperCamelCase__ , UpperCamelCase__ ), invert_modulo(UpperCamelCase__ , UpperCamelCase__ )
A = na * na
A = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 690 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase__ :
def __init__( self : Optional[Any] , _lowercase : int=2 , _lowercase : Optional[Any]=3 , _lowercase : Any=64 , _lowercase : Tuple=None ):
A = np.random.default_rng(_lowercase )
A = length
A = rng.normal(size=(length,) ).astype(np.floataa )
A = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : str ):
return self.length
def __getitem__( self : List[str] , _lowercase : int ):
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase__ ( torch.nn.Module ):
def __init__( self : Optional[int] , _lowercase : Any=0 , _lowercase : List[Any]=0 , _lowercase : Optional[int]=False ):
super().__init__()
A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A = True
def __a ( self : Optional[Any] , _lowercase : str=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
A = False
return x * self.a[0] + self.b[0]
class lowerCamelCase__ ( torch.nn.Module ):
def __init__( self : Optional[Any] , _lowercase : Any=0 , _lowercase : List[str]=0 , _lowercase : str=False ):
super().__init__()
A = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
A = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
A = True
def __a ( self : int , _lowercase : Tuple=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
A = False
return x * self.a + self.b
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ = 16 ) -> Optional[Any]:
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
A = AutoTokenizer.from_pretrained('bert-base-cased' )
A = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
A = load_dataset('csv' , data_files=UpperCamelCase__ )
A = datasets['train'].unique('label' )
A = {v: i for i, v in enumerate(UpperCamelCase__ )}
def tokenize_function(UpperCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
A = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
if "label" in examples:
A = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(UpperCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(UpperCamelCase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
A = DataLoader(tokenized_datasets['train'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=2 )
A = DataLoader(tokenized_datasets['validation'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 690 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Any = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[Any] = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 690 |
"""simple docstring"""
from __future__ import annotations
def __snake_case ( UpperCamelCase__ ) -> list[int]: # This function is recursive
"""simple docstring"""
A = len(UpperCamelCase__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
A = array[0]
A = False
A = 1
A = []
while not is_found and i < array_length:
if array[i] < pivot:
A = True
A = [element for element in array[i:] if element >= array[i]]
A = longest_subsequence(UpperCamelCase__ )
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
A = temp_array
else:
i += 1
A = [element for element in array[1:] if element >= pivot]
A = [pivot, *longest_subsequence(UpperCamelCase__ )]
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 1 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowerCamelCase__ ( enum.Enum ):
lowerCAmelCase = 0
lowerCAmelCase = 1
lowerCAmelCase = 2
@add_end_docstrings(UpperCAmelCase_ )
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__( self : str , *_lowercase : Optional[int] , **_lowercase : str ):
super().__init__(*_lowercase , **_lowercase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
A = None
if self.model.config.prefix is not None:
A = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
A = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
A , A , A = self._sanitize_parameters(prefix=_lowercase , **self._forward_params )
A = {**self._preprocess_params, **preprocess_params}
A = {**self._forward_params, **forward_params}
def __a ( self : List[str] , _lowercase : List[str]=None , _lowercase : str=None , _lowercase : Optional[int]=None , _lowercase : Union[str, Any]=None , _lowercase : List[str]=None , _lowercase : Any=None , _lowercase : List[Any]=None , _lowercase : Union[str, Any]=None , **_lowercase : List[str] , ):
A = {}
if prefix is not None:
A = prefix
if prefix:
A = self.tokenizer(
_lowercase , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
A = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'
' [None, \'hole\']' )
A = handle_long_generation
preprocess_params.update(_lowercase )
A = generate_kwargs
A = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
A = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
A = ReturnType.TENSORS
if return_type is not None:
A = return_type
if clean_up_tokenization_spaces is not None:
A = clean_up_tokenization_spaces
if stop_sequence is not None:
A = self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
if len(_lowercase ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
A = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __a ( self : int , *_lowercase : List[Any] , **_lowercase : Any ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*_lowercase , **_lowercase )
def __call__( self : List[str] , _lowercase : Optional[Any] , **_lowercase : List[str] ):
return super().__call__(_lowercase , **_lowercase )
def __a ( self : Any , _lowercase : str , _lowercase : List[Any]="" , _lowercase : Optional[int]=None , **_lowercase : Union[str, Any] ):
A = self.tokenizer(
prefix + prompt_text , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
A = prompt_text
if handle_long_generation == "hole":
A = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
A = generate_kwargs['max_new_tokens']
else:
A = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
A = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
A = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
A = inputs['attention_mask'][:, -keep_length:]
return inputs
def __a ( self : List[str] , _lowercase : Optional[int] , **_lowercase : Optional[Any] ):
A = model_inputs['input_ids']
A = model_inputs.get('attention_mask' , _lowercase )
# Allow empty prompts
if input_ids.shape[1] == 0:
A = None
A = None
A = 1
else:
A = input_ids.shape[0]
A = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
A = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
A = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
A = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
A = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
A = self.model.generate(input_ids=_lowercase , attention_mask=_lowercase , **_lowercase )
A = generated_sequence.shape[0]
if self.framework == "pt":
A = generated_sequence.reshape(_lowercase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
A = tf.reshape(_lowercase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __a ( self : str , _lowercase : str , _lowercase : Optional[int]=ReturnType.FULL_TEXT , _lowercase : int=True ):
A = model_outputs['generated_sequence'][0]
A = model_outputs['input_ids']
A = model_outputs['prompt_text']
A = generated_sequence.numpy().tolist()
A = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
A = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
A = self.tokenizer.decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
A = 0
else:
A = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , ) )
if return_type == ReturnType.FULL_TEXT:
A = prompt_text + text[prompt_length:]
else:
A = text[prompt_length:]
A = {'generated_text': all_text}
records.append(_lowercase )
return records
| 690 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
UpperCamelCase : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
UpperCamelCase : Optional[int] = typing.Union[np.floataa, int, float] # noqa: UP007
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> VectorOut:
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(UpperCamelCase__ ) - np.asarray(UpperCamelCase__ )) ** 2 ) )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> VectorOut:
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(UpperCamelCase__ , UpperCamelCase__ ) ) ** (1 / 2)
if __name__ == "__main__":
def __snake_case ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
benchmark()
| 690 | 1 |
"""simple docstring"""
from __future__ import annotations
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> list[list[int]]:
"""simple docstring"""
A = []
A = []
A = 0
A = sum(UpperCamelCase__ )
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return result
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> None:
"""simple docstring"""
if sum(UpperCamelCase__ ) > max_sum or (remaining_nums_sum + sum(UpperCamelCase__ )) < max_sum:
return
if sum(UpperCamelCase__ ) == max_sum:
result.append(UpperCamelCase__ )
return
for index in range(UpperCamelCase__ , len(UpperCamelCase__ ) ):
create_state_space_tree(
UpperCamelCase__ , UpperCamelCase__ , index + 1 , [*path, nums[index]] , UpperCamelCase__ , remaining_nums_sum - nums[index] , )
UpperCamelCase : Union[str, Any] = [3, 34, 4, 12, 5, 2]
UpperCamelCase : int = 9
UpperCamelCase : List[Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 690 |
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
UpperCamelCase : List[Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False , ) -> Any:
"""simple docstring"""
output_path.parent.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , enable_onnx_checker=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
else:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
@torch.no_grad()
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> str:
"""simple docstring"""
A = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
A = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
A = 'cpu'
A = StableDiffusionPipeline.from_pretrained(UpperCamelCase__ , torch_dtype=UpperCamelCase__ ).to(UpperCamelCase__ )
A = Path(UpperCamelCase__ )
# TEXT ENCODER
A = pipeline.text_encoder.config.max_position_embeddings
A = pipeline.text_encoder.config.hidden_size
A = pipeline.tokenizer(
'A sample prompt' , padding='max_length' , max_length=pipeline.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors='pt' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=UpperCamelCase__ , dtype=torch.intaa )) , output_path=output_path / 'text_encoder' / 'model.onnx' , ordered_input_names=['input_ids'] , output_names=['last_hidden_state', 'pooler_output'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'sequence'},
} , opset=UpperCamelCase__ , )
del pipeline.text_encoder
# UNET
A = pipeline.unet.config.in_channels
A = pipeline.unet.config.sample_size
A = output_path / 'unet' / 'model.onnx'
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(2 ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(2 , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=UpperCamelCase__ , ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'] , output_names=['out_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'timestep': {0: 'batch'},
'encoder_hidden_states': {0: 'batch', 1: 'sequence'},
} , opset=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , )
A = str(unet_path.absolute().as_posix() )
A = os.path.dirname(UpperCamelCase__ )
A = onnx.load(UpperCamelCase__ )
# clean up existing tensor files
shutil.rmtree(UpperCamelCase__ )
os.mkdir(UpperCamelCase__ )
# collate external tensor files into one
onnx.save_model(
UpperCamelCase__ , UpperCamelCase__ , save_as_external_data=UpperCamelCase__ , all_tensors_to_one_file=UpperCamelCase__ , location='weights.pb' , convert_attribute=UpperCamelCase__ , )
del pipeline.unet
# VAE ENCODER
A = pipeline.vae
A = vae_encoder.config.in_channels
A = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
A = lambda UpperCamelCase__ , UpperCamelCase__ : vae_encoder.encode(UpperCamelCase__ , UpperCamelCase__ )[0].sample()
onnx_export(
UpperCamelCase__ , model_args=(
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=output_path / 'vae_encoder' / 'model.onnx' , ordered_input_names=['sample', 'return_dict'] , output_names=['latent_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=UpperCamelCase__ , )
# VAE DECODER
A = pipeline.vae
A = vae_decoder.config.latent_channels
A = vae_decoder.config.out_channels
# forward only through the decoder part
A = vae_encoder.decode
onnx_export(
UpperCamelCase__ , model_args=(
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=UpperCamelCase__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
A = pipeline.safety_checker
A = safety_checker.config.vision_config.num_channels
A = safety_checker.config.vision_config.image_size
A = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
) , output_path=output_path / 'safety_checker' / 'model.onnx' , ordered_input_names=['clip_input', 'images'] , output_names=['out_images', 'has_nsfw_concepts'] , dynamic_axes={
'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'},
} , opset=UpperCamelCase__ , )
del pipeline.safety_checker
A = OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker' )
A = pipeline.feature_extractor
else:
A = None
A = None
A = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet' ) , scheduler=pipeline.scheduler , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(UpperCamelCase__ )
print('ONNX pipeline saved to' , UpperCamelCase__ )
del pipeline
del onnx_pipeline
A = OnnxStableDiffusionPipeline.from_pretrained(UpperCamelCase__ , provider='CPUExecutionProvider' )
print('ONNX pipeline is loadable' )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
UpperCamelCase : str = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 690 | 1 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
# General docstring
UpperCamelCase : str = "RegNetConfig"
# Base docstring
UpperCamelCase : List[Any] = "facebook/regnet-y-040"
UpperCamelCase : int = [1, 1_088, 7, 7]
# Image classification docstring
UpperCamelCase : List[Any] = "facebook/regnet-y-040"
UpperCamelCase : List[Any] = "tabby, tabby cat"
UpperCamelCase : List[Any] = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCamelCase__ ( tf.keras.layers.Layer ):
def __init__( self : int , _lowercase : int , _lowercase : int = 3 , _lowercase : int = 1 , _lowercase : int = 1 , _lowercase : Optional[str] = "relu" , **_lowercase : Optional[Any] , ):
super().__init__(**_lowercase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
A = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
A = tf.keras.layers.ConvaD(
filters=_lowercase , kernel_size=_lowercase , strides=_lowercase , padding='VALID' , groups=_lowercase , use_bias=_lowercase , name='convolution' , )
A = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' )
A = ACTaFN[activation] if activation is not None else tf.identity
def __a ( self : List[str] , _lowercase : int ):
A = self.convolution(self.padding(_lowercase ) )
A = self.normalization(_lowercase )
A = self.activation(_lowercase )
return hidden_state
class lowerCamelCase__ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , _lowercase : RegNetConfig , **_lowercase : Optional[Any] ):
super().__init__(**_lowercase )
A = config.num_channels
A = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def __a ( self : int , _lowercase : int ):
A = shape_list(_lowercase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
A = tf.transpose(_lowercase , perm=(0, 2, 3, 1) )
A = self.embedder(_lowercase )
return hidden_state
class lowerCamelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Any , _lowercase : int , _lowercase : int = 2 , **_lowercase : Optional[int] ):
super().__init__(**_lowercase )
A = tf.keras.layers.ConvaD(
filters=_lowercase , kernel_size=1 , strides=_lowercase , use_bias=_lowercase , name='convolution' )
A = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' )
def __a ( self : Optional[int] , _lowercase : tf.Tensor , _lowercase : bool = False ):
return self.normalization(self.convolution(_lowercase ) , training=_lowercase )
class lowerCamelCase__ ( tf.keras.layers.Layer ):
def __init__( self : str , _lowercase : int , _lowercase : int , **_lowercase : Dict ):
super().__init__(**_lowercase )
A = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_lowercase , name='pooler' )
A = [
tf.keras.layers.ConvaD(filters=_lowercase , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=_lowercase , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def __a ( self : Dict , _lowercase : str ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
A = self.pooler(_lowercase )
for layer_module in self.attention:
A = layer_module(_lowercase )
A = hidden_state * pooled
return hidden_state
class lowerCamelCase__ ( tf.keras.layers.Layer ):
def __init__( self : List[str] , _lowercase : RegNetConfig , _lowercase : int , _lowercase : int , _lowercase : int = 1 , **_lowercase : Dict ):
super().__init__(**_lowercase )
A = in_channels != out_channels or stride != 1
A = max(1 , out_channels // config.groups_width )
A = (
TFRegNetShortCut(_lowercase , stride=_lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
A = [
TFRegNetConvLayer(_lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
_lowercase , stride=_lowercase , groups=_lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(_lowercase , kernel_size=1 , activation=_lowercase , name='layer.2' ),
]
A = ACTaFN[config.hidden_act]
def __a ( self : List[str] , _lowercase : Optional[int] ):
A = hidden_state
for layer_module in self.layers:
A = layer_module(_lowercase )
A = self.shortcut(_lowercase )
hidden_state += residual
A = self.activation(_lowercase )
return hidden_state
class lowerCamelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , _lowercase : RegNetConfig , _lowercase : int , _lowercase : int , _lowercase : int = 1 , **_lowercase : Optional[int] ):
super().__init__(**_lowercase )
A = in_channels != out_channels or stride != 1
A = max(1 , out_channels // config.groups_width )
A = (
TFRegNetShortCut(_lowercase , stride=_lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
A = [
TFRegNetConvLayer(_lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
_lowercase , stride=_lowercase , groups=_lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(_lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(_lowercase , kernel_size=1 , activation=_lowercase , name='layer.3' ),
]
A = ACTaFN[config.hidden_act]
def __a ( self : List[Any] , _lowercase : Optional[int] ):
A = hidden_state
for layer_module in self.layers:
A = layer_module(_lowercase )
A = self.shortcut(_lowercase )
hidden_state += residual
A = self.activation(_lowercase )
return hidden_state
class lowerCamelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Any , _lowercase : RegNetConfig , _lowercase : int , _lowercase : int , _lowercase : int = 2 , _lowercase : int = 2 , **_lowercase : List[str] ):
super().__init__(**_lowercase )
A = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
A = [
# downsampling is done in the first layer with stride of 2
layer(_lowercase , _lowercase , _lowercase , stride=_lowercase , name='layers.0' ),
*[layer(_lowercase , _lowercase , _lowercase , name=f'layers.{i+1}' ) for i in range(depth - 1 )],
]
def __a ( self : Optional[int] , _lowercase : Optional[Any] ):
for layer_module in self.layers:
A = layer_module(_lowercase )
return hidden_state
class lowerCamelCase__ ( tf.keras.layers.Layer ):
def __init__( self : int , _lowercase : RegNetConfig , **_lowercase : Union[str, Any] ):
super().__init__(**_lowercase )
A = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
A = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_lowercase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_lowercase , _lowercase , _lowercase , depth=_lowercase , name=f'stages.{i+1}' ) )
def __a ( self : str , _lowercase : tf.Tensor , _lowercase : bool = False , _lowercase : bool = True ):
A = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A = hidden_states + (hidden_state,)
A = stage_module(_lowercase )
if output_hidden_states:
A = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_lowercase , hidden_states=_lowercase )
@keras_serializable
class lowerCamelCase__ ( tf.keras.layers.Layer ):
lowerCAmelCase = RegNetConfig
def __init__( self : str , _lowercase : str , **_lowercase : Dict ):
super().__init__(**_lowercase )
A = config
A = TFRegNetEmbeddings(_lowercase , name='embedder' )
A = TFRegNetEncoder(_lowercase , name='encoder' )
A = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_lowercase , name='pooler' )
@unpack_inputs
def __a ( self : Union[str, Any] , _lowercase : tf.Tensor , _lowercase : Optional[bool] = None , _lowercase : Optional[bool] = None , _lowercase : bool = False , ):
A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A = return_dict if return_dict is not None else self.config.use_return_dict
A = self.embedder(_lowercase , training=_lowercase )
A = self.encoder(
_lowercase , output_hidden_states=_lowercase , return_dict=_lowercase , training=_lowercase )
A = encoder_outputs[0]
A = self.pooler(_lowercase )
# Change to NCHW output format have uniformity in the modules
A = tf.transpose(_lowercase , perm=(0, 3, 1, 2) )
A = tf.transpose(_lowercase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
A = tuple([tf.transpose(_lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_lowercase , pooler_output=_lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = RegNetConfig
lowerCAmelCase = """regnet"""
lowerCAmelCase = """pixel_values"""
@property
def __a ( self : List[Any] ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
UpperCamelCase : Dict = R"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCamelCase : List[Any] = R"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , UpperCAmelCase_ , )
class lowerCamelCase__ ( UpperCAmelCase_ ):
def __init__( self : Tuple , _lowercase : RegNetConfig , *_lowercase : int , **_lowercase : Dict ):
super().__init__(_lowercase , *_lowercase , **_lowercase )
A = TFRegNetMainLayer(_lowercase , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(_lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __a ( self : int , _lowercase : tf.Tensor , _lowercase : Optional[bool] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[int]=False , ):
A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A = return_dict if return_dict is not None else self.config.use_return_dict
A = self.regnet(
pixel_values=_lowercase , output_hidden_states=_lowercase , return_dict=_lowercase , training=_lowercase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , UpperCAmelCase_ , )
class lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
def __init__( self : Dict , _lowercase : RegNetConfig , *_lowercase : Dict , **_lowercase : Optional[int] ):
super().__init__(_lowercase , *_lowercase , **_lowercase )
A = config.num_labels
A = TFRegNetMainLayer(_lowercase , name='regnet' )
# classification head
A = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __a ( self : Optional[int] , _lowercase : tf.Tensor = None , _lowercase : tf.Tensor = None , _lowercase : bool = None , _lowercase : bool = None , _lowercase : Union[str, Any]=False , ):
A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A = return_dict if return_dict is not None else self.config.use_return_dict
A = self.regnet(
_lowercase , output_hidden_states=_lowercase , return_dict=_lowercase , training=_lowercase )
A = outputs.pooler_output if return_dict else outputs[1]
A = self.classifier[0](_lowercase )
A = self.classifier[1](_lowercase )
A = None if labels is None else self.hf_compute_loss(labels=_lowercase , logits=_lowercase )
if not return_dict:
A = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_lowercase , logits=_lowercase , hidden_states=outputs.hidden_states )
| 690 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
UpperCamelCase : List[str] = Lock()
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
A = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
A = min(UpperCamelCase__ , UpperCamelCase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
A = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
A = max(UpperCamelCase__ , UpperCamelCase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(UpperCamelCase__ )
def __snake_case ( UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
A = []
A = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
A = Pipe()
A = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
A = temp_rs
A = temp_rr
for i in range(1 , len(UpperCamelCase__ ) - 1 ):
A = Pipe()
A = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
A = temp_rs
A = temp_rr
process_array_.append(
Process(
target=UpperCamelCase__ , args=(
len(UpperCamelCase__ ) - 1,
arr[len(UpperCamelCase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(UpperCamelCase__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(UpperCamelCase__ ) ):
A = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __snake_case ( ) -> Optional[Any]:
"""simple docstring"""
A = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*UpperCamelCase__ )
A = odd_even_transposition(UpperCamelCase__ )
print('Sorted List\n' )
print(*UpperCamelCase__ )
if __name__ == "__main__":
main()
| 690 | 1 |
"""simple docstring"""
class lowerCamelCase__ :
def __init__( self : Optional[Any] , _lowercase : Optional[Any] ):
# we need a list not a string, so do something to change the type
A = arr.split(',' )
def __a ( self : int ):
A = [int(self.array[0] )] * len(self.array )
A = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
A = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
A = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
UpperCamelCase : str = input("please input some numbers:")
UpperCamelCase : List[str] = SubArray(whole_array)
UpperCamelCase : Tuple = array.solve_sub_array()
print(("the results is:", re))
| 690 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
UpperCamelCase : int = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
UpperCamelCase : List[Any] = dataset.iloc[:, 1:2].values
UpperCamelCase : Any = dataset.iloc[:, 2].values
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = train_test_split(X, y, test_size=0.2, random_state=0)
UpperCamelCase : List[str] = PolynomialFeatures(degree=4)
UpperCamelCase : Optional[int] = poly_reg.fit_transform(X)
UpperCamelCase : List[Any] = LinearRegression()
pol_reg.fit(X_poly, y)
def __snake_case ( ) -> Optional[int]:
"""simple docstring"""
plt.scatter(UpperCamelCase__ , UpperCamelCase__ , color='red' )
plt.plot(UpperCamelCase__ , pol_reg.predict(poly_reg.fit_transform(UpperCamelCase__ ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 690 | 1 |
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {"vocab_file": "spiece.model"}
UpperCamelCase : str = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
UpperCamelCase : Dict = {
"AI-Sweden/gpt-sw3-126m": 2_048,
"AI-Sweden/gpt-sw3-350m": 2_048,
"AI-Sweden/gpt-sw3-1.6b": 2_048,
"AI-Sweden/gpt-sw3-6.7b": 2_048,
"AI-Sweden/gpt-sw3-20b": 2_048,
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : str , _lowercase : str , _lowercase : Union[str, Any]=False , _lowercase : List[Any]=False , _lowercase : str=False , _lowercase : Any=None , _lowercase : str=None , _lowercase : List[str]=None , _lowercase : Optional[Any]=None , _lowercase : Optional[Dict[str, Any]] = None , **_lowercase : Optional[int] , ):
A = {} if sp_model_kwargs is None else sp_model_kwargs
A = kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
A = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
A = '<|endoftext|>' if eos_token is None else eos_token
A = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
A = unk_token if pad_token is None else pad_token
A = eos_token if bos_token is None else bos_token
else:
A = '<pad>' if pad_token is None else pad_token
A = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowercase )
# Used for whitespace normalization in input texts
# fmt : off
A = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
A = re.compile(
f'[{"".join(map(_lowercase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]' )
def __getstate__( self : List[Any] ):
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : str , _lowercase : List[Any] ):
A = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __a ( self : Any ):
return len(self.sp_model )
def __a ( self : str , _lowercase : str ):
A = self.non_printing_characters_re.sub('' , _lowercase )
# Normalize whitespaces
A = ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
A = unicodedata.normalize('NFC' , _lowercase )
return text
def __a ( self : List[Any] , _lowercase : str , **_lowercase : List[Any] ):
A = self.preprocess_text(_lowercase )
return self.sp_model.encode(_lowercase , out_type=_lowercase )
def __a ( self : List[str] , _lowercase : str ):
return self.sp_model.PieceToId(_lowercase )
def __a ( self : Dict , _lowercase : int ):
return self.sp_model.IdToPiece(_lowercase )
@staticmethod
def __a ( _lowercase : str ):
return out_string
def __a ( self : int , _lowercase : List[str] ):
A = []
A = ''
A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowercase ) + token
A = True
A = []
else:
current_sub_tokens.append(_lowercase )
A = False
out_string += self.sp_model.decode(_lowercase )
return out_string
def __a ( self : Tuple ):
A = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __a ( self : Optional[int] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowercase , 'wb' ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (out_vocab_file,)
def __a ( self : Any , _lowercase : Union[str, List[str]] , _lowercase : Union[str, bool] = False ):
if isinstance(_lowercase , _lowercase ):
A = self.preprocess_text(_lowercase )
A = self.sp_model.encode(_lowercase )
else:
A = [self.preprocess_text(_lowercase ) for t in text]
A = self.sp_model.encode(_lowercase )
if return_tensors is True or return_tensors == "pt":
A = torch.tensor(_lowercase )
return token_ids
def __a ( self : Union[str, Any] , _lowercase : Union[int, List[int]] ):
return self.sp_model.decode(_lowercase )
def __a ( self : int , _lowercase : "Conversation" ):
A = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
A = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(_lowercase ) + f'{self.bos_token}Bot:'
)
return self.encode(text=_lowercase )
| 690 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = ["""pixel_values"""]
def __init__( self : List[str] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 255 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : bool = True , **_lowercase : Tuple , ):
super().__init__(**_lowercase )
A = size if size is not None else {'shortest_edge': 224}
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(_lowercase , default_to_square=_lowercase , param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A = image_std if image_std is not None else OPENAI_CLIP_STD
A = do_convert_rgb
def __a ( self : str , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : List[str] , ):
A = get_size_dict(_lowercase , default_to_square=_lowercase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(_lowercase , size=size['shortest_edge'] , default_to_square=_lowercase )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
A = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_lowercase , size=(size['height'], size['width']) , data_format=_lowercase , **_lowercase )
def __a ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : List[str] , ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Union[str, Any] , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : Optional[int] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : int = None , _lowercase : bool = None , _lowercase : float = None , _lowercase : bool = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : bool = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[ChannelDimension] = ChannelDimension.FIRST , **_lowercase : int , ):
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(_lowercase , param_name='size' , default_to_square=_lowercase )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(_lowercase , param_name='crop_size' , default_to_square=_lowercase )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A = [convert_to_rgb(_lowercase ) for image in images]
# All transformations expect numpy arrays.
A = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
A = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_center_crop:
A = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images]
if do_rescale:
A = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
A = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
A = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 690 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
UpperCamelCase : Dict = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """perceiver"""
def __init__( self : int , _lowercase : Tuple=256 , _lowercase : Optional[int]=1_280 , _lowercase : str=768 , _lowercase : Dict=1 , _lowercase : Optional[Any]=26 , _lowercase : Dict=8 , _lowercase : int=8 , _lowercase : List[Any]=None , _lowercase : Optional[int]=None , _lowercase : int="kv" , _lowercase : Dict=1 , _lowercase : Optional[Any]=1 , _lowercase : Dict="gelu" , _lowercase : int=0.1 , _lowercase : Union[str, Any]=0.0_2 , _lowercase : Union[str, Any]=1e-12 , _lowercase : Dict=True , _lowercase : List[Any]=262 , _lowercase : Optional[Any]=2_048 , _lowercase : int=56 , _lowercase : str=[368, 496] , _lowercase : Any=16 , _lowercase : Dict=1_920 , _lowercase : Optional[Any]=16 , _lowercase : str=[1, 16, 224, 224] , **_lowercase : Any , ):
super().__init__(**_lowercase )
A = num_latents
A = d_latents
A = d_model
A = num_blocks
A = num_self_attends_per_block
A = num_self_attention_heads
A = num_cross_attention_heads
A = qk_channels
A = v_channels
A = cross_attention_shape_for_attention
A = self_attention_widening_factor
A = cross_attention_widening_factor
A = hidden_act
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = use_query_residual
# masked language modeling attributes
A = vocab_size
A = max_position_embeddings
# image classification attributes
A = image_size
# flow attributes
A = train_size
# multimodal autoencoding attributes
A = num_frames
A = audio_samples_per_frame
A = samples_per_patch
A = output_shape
class lowerCamelCase__ ( UpperCAmelCase_ ):
@property
def __a ( self : int ):
if self.task == "multiple-choice":
A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def __a ( self : List[Any] ):
return 1e-4
def __a ( self : int , _lowercase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional[TensorType] = None , _lowercase : int = 3 , _lowercase : int = 40 , _lowercase : int = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(_lowercase , _lowercase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = preprocessor.num_special_tokens_to_add(_lowercase )
A = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowercase )
# Generate dummy inputs according to compute batch and sequence
A = [' '.join(['a'] ) * seq_length] * batch_size
A = dict(preprocessor(_lowercase , return_tensors=_lowercase ) )
A = inputs.pop('input_ids' )
return inputs
elif isinstance(_lowercase , _lowercase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(_lowercase , fixed_dimension=OnnxConfig.default_fixed_batch )
A = self._generate_dummy_images(_lowercase , _lowercase , _lowercase , _lowercase )
A = dict(preprocessor(images=_lowercase , return_tensors=_lowercase ) )
A = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 690 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Union[str, Any] ):
A = torch.nn.Linear(10 , 10 )
A = torch.optim.SGD(model.parameters() , 0.1 )
A = Accelerator()
A = accelerator.prepare(_lowercase )
try:
pickle.loads(pickle.dumps(_lowercase ) )
except Exception as e:
self.fail(f'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state()
| 690 | 1 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = XLMRobertaTokenizer
lowerCAmelCase = XLMRobertaTokenizerFast
lowerCAmelCase = True
lowerCAmelCase = True
def __a ( self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
A = XLMRobertaTokenizer(_lowercase , keep_accents=_lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self : Optional[Any] ):
A = '<pad>'
A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def __a ( self : Dict ):
A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(_lowercase ) , 1_002 )
def __a ( self : Optional[int] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_002 )
def __a ( self : List[Any] ):
A = XLMRobertaTokenizer(_lowercase , keep_accents=_lowercase )
A = tokenizer.tokenize('This is a test' )
self.assertListEqual(_lowercase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
A = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
A = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(
_lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
A = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def __a ( self : str ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
A = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
A = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
A = tempfile.mkdtemp()
A = tokenizer_r.save_pretrained(_lowercase )
A = tokenizer_p.save_pretrained(_lowercase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
A = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(_lowercase , _lowercase )
# Checks everything loads correctly in the same way
A = tokenizer_r.from_pretrained(_lowercase )
A = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase , _lowercase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_lowercase )
# Save tokenizer rust, legacy_format=True
A = tempfile.mkdtemp()
A = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase )
A = tokenizer_p.save_pretrained(_lowercase )
# Checks it save with the same files
self.assertSequenceEqual(_lowercase , _lowercase )
# Checks everything loads correctly in the same way
A = tokenizer_r.from_pretrained(_lowercase )
A = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase , _lowercase ) )
shutil.rmtree(_lowercase )
# Save tokenizer rust, legacy_format=False
A = tempfile.mkdtemp()
A = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase )
A = tokenizer_p.save_pretrained(_lowercase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A = tokenizer_r.from_pretrained(_lowercase )
A = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase , _lowercase ) )
shutil.rmtree(_lowercase )
@cached_property
def __a ( self : str ):
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def __a ( self : List[Any] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_lowercase , f.name )
A = XLMRobertaTokenizer(f.name , keep_accents=_lowercase )
A = pickle.dumps(_lowercase )
pickle.loads(_lowercase )
def __a ( self : Optional[int] ):
if not self.test_rust_tokenizer:
return
A = self.get_tokenizer()
A = self.get_rust_tokenizer()
A = 'I was born in 92000, and this is falsé.'
A = tokenizer.tokenize(_lowercase )
A = rust_tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
A = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
A = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
A = self.get_rust_tokenizer()
A = tokenizer.encode(_lowercase )
A = rust_tokenizer.encode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
@slow
def __a ( self : int ):
A = 'Hello World!'
A = [0, 35_378, 6_661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase ) )
@slow
def __a ( self : List[str] ):
A = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
A = [
0,
3_293,
83,
10,
4_552,
4_989,
7_986,
678,
10,
5_915,
111,
179_459,
124_850,
4,
6_044,
237,
12,
6,
5,
6,
4,
6_780,
705,
15,
1_388,
44,
378,
10_114,
711,
152,
20,
6,
5,
22_376,
642,
1_221,
15_190,
34_153,
450,
5_608,
959,
1_119,
57_702,
136,
186,
47,
1_098,
29_367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_044,
237,
6_284,
50_901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase ) )
@slow
def __a ( self : Tuple ):
# fmt: off
A = {'input_ids': [[0, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [0, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 690 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """convbert"""
def __init__( self : Optional[int] , _lowercase : List[Any]=30_522 , _lowercase : List[str]=768 , _lowercase : Optional[Any]=12 , _lowercase : Any=12 , _lowercase : str=3_072 , _lowercase : List[str]="gelu" , _lowercase : Dict=0.1 , _lowercase : Dict=0.1 , _lowercase : Any=512 , _lowercase : List[str]=2 , _lowercase : Tuple=0.0_2 , _lowercase : List[Any]=1e-12 , _lowercase : List[str]=1 , _lowercase : Tuple=0 , _lowercase : Any=2 , _lowercase : Union[str, Any]=768 , _lowercase : str=2 , _lowercase : Any=9 , _lowercase : Union[str, Any]=1 , _lowercase : Dict=None , **_lowercase : Union[str, Any] , ):
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase , )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = embedding_size
A = head_ratio
A = conv_kernel_size
A = num_groups
A = classifier_dropout
class lowerCamelCase__ ( UpperCAmelCase_ ):
@property
def __a ( self : str ):
if self.task == "multiple-choice":
A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 690 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase : List[Any] = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = ["GLPNFeatureExtractor"]
UpperCamelCase : Optional[int] = ["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[Any] = [
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 690 |
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 690 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self : Any , _lowercase : Tuple , _lowercase : int=3 , _lowercase : int=32 , _lowercase : Any=3 , _lowercase : str=10 , _lowercase : int=[10, 20, 30, 40] , _lowercase : List[str]=[1, 1, 2, 1] , _lowercase : Tuple=True , _lowercase : List[str]=True , _lowercase : int="relu" , _lowercase : List[str]=3 , _lowercase : str=None , ):
A = parent
A = batch_size
A = image_size
A = num_channels
A = embeddings_size
A = hidden_sizes
A = depths
A = is_training
A = use_labels
A = hidden_act
A = num_labels
A = scope
A = len(_lowercase )
def __a ( self : Tuple ):
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = self.get_config()
return config, pixel_values
def __a ( self : List[Any] ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __a ( self : int , _lowercase : Any , _lowercase : int ):
A = FlaxRegNetModel(config=_lowercase )
A = model(_lowercase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __a ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Dict ):
A = self.num_labels
A = FlaxRegNetForImageClassification(config=_lowercase )
A = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : Tuple ):
A = self.prepare_config_and_inputs()
A , A = config_and_inputs
A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def __a ( self : List[Any] ):
A = FlaxRegNetModelTester(self )
A = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def __a ( self : List[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self : str ):
return
def __a ( self : List[Any] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def __a ( self : Any ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def __a ( self : str ):
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def __a ( self : Any ):
pass
def __a ( self : List[Any] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(_lowercase )
A = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowercase )
def __a ( self : List[Any] ):
def check_hidden_states_output(_lowercase : str , _lowercase : Dict , _lowercase : Optional[Any] ):
A = model_class(_lowercase )
A = model(**self._prepare_for_class(_lowercase , _lowercase ) )
A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A = self.model_tester.num_stages
self.assertEqual(len(_lowercase ) , expected_num_stages + 1 )
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def __a ( self : int ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A = self._prepare_for_class(_lowercase , _lowercase )
A = model_class(_lowercase )
@jax.jit
def model_jitted(_lowercase : Optional[Any] , **_lowercase : Any ):
return model(pixel_values=_lowercase , **_lowercase )
with self.subTest('JIT Enabled' ):
A = model_jitted(**_lowercase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
A = model_jitted(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for jitted_output, output in zip(_lowercase , _lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def __snake_case ( ) -> Any:
"""simple docstring"""
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
@cached_property
def __a ( self : int ):
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def __a ( self : Tuple ):
A = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=_lowercase , return_tensors='np' )
A = model(**_lowercase )
# verify the logits
A = (1, 1_000)
self.assertEqual(outputs.logits.shape , _lowercase )
A = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
| 690 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def __snake_case ( UpperCamelCase__ ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
A = {'+', '-', '*', '/'}
A = []
for token in postfix_notation:
if token in operations:
A , A = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCamelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 1 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
UpperCamelCase : Tuple = logging.getLogger(__name__)
@dataclass
class lowerCamelCase__ :
lowerCAmelCase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase = field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
lowerCAmelCase = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
lowerCAmelCase = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowerCAmelCase = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
lowerCAmelCase = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
@dataclass
class lowerCamelCase__ :
lowerCAmelCase = field(
default=UpperCAmelCase_ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowerCAmelCase = field(
default=UpperCAmelCase_ , metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""} )
lowerCAmelCase = field(
default=UpperCAmelCase_ , metadata={"""help""": """Train language if it is different from the evaluation language."""} )
lowerCAmelCase = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowerCAmelCase = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowerCAmelCase = field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
lowerCAmelCase = field(
default=UpperCAmelCase_ , metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""} , )
lowerCAmelCase = field(
default=UpperCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
lowerCAmelCase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowerCAmelCase = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowerCAmelCase = field(
default=UpperCAmelCase_ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def __snake_case ( ) -> List[str]:
"""simple docstring"""
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A , A , A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_xnli' , UpperCamelCase__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
datasets.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
A = load_dataset(
'xnli' , model_args.language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
A = load_dataset(
'xnli' , model_args.train_language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
A = train_dataset.features['label'].names
if training_args.do_eval:
A = load_dataset(
'xnli' , model_args.language , split='validation' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
A = eval_dataset.features['label'].names
if training_args.do_predict:
A = load_dataset(
'xnli' , model_args.language , split='test' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
A = predict_dataset.features['label'].names
# Labels
A = len(UpperCamelCase__ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCamelCase__ , idalabel={str(UpperCamelCase__ ): label for i, label in enumerate(UpperCamelCase__ )} , labelaid={label: i for i, label in enumerate(UpperCamelCase__ )} , finetuning_task='xnli' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
A = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
A = False
def preprocess_function(UpperCamelCase__ ):
# Tokenize the texts
return tokenizer(
examples['premise'] , examples['hypothesis'] , padding=UpperCamelCase__ , max_length=data_args.max_seq_length , truncation=UpperCamelCase__ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
A = min(len(UpperCamelCase__ ) , data_args.max_train_samples )
A = train_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
A = train_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on train dataset' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(UpperCamelCase__ ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
A = min(len(UpperCamelCase__ ) , data_args.max_eval_samples )
A = eval_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
A = eval_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on validation dataset' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
A = min(len(UpperCamelCase__ ) , data_args.max_predict_samples )
A = predict_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc='prediction dataset map pre-processing' ):
A = predict_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on prediction dataset' , )
# Get the metric function
A = evaluate.load('xnli' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(UpperCamelCase__ ):
A = p.predictions[0] if isinstance(p.predictions , UpperCamelCase__ ) else p.predictions
A = np.argmax(UpperCamelCase__ , axis=1 )
return metric.compute(predictions=UpperCamelCase__ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
A = default_data_collator
elif training_args.fpaa:
A = DataCollatorWithPadding(UpperCamelCase__ , pad_to_multiple_of=8 )
else:
A = None
# Initialize our Trainer
A = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=UpperCamelCase__ , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , )
# Training
if training_args.do_train:
A = None
if training_args.resume_from_checkpoint is not None:
A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A = last_checkpoint
A = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
A = train_result.metrics
A = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ )
)
A = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , UpperCamelCase__ )
trainer.save_metrics('train' , UpperCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A = trainer.evaluate(eval_dataset=UpperCamelCase__ )
A = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ )
A = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics('eval' , UpperCamelCase__ )
trainer.save_metrics('eval' , UpperCamelCase__ )
# Prediction
if training_args.do_predict:
logger.info('*** Predict ***' )
A , A , A = trainer.predict(UpperCamelCase__ , metric_key_prefix='predict' )
A = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(UpperCamelCase__ )
)
A = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics('predict' , UpperCamelCase__ )
trainer.save_metrics('predict' , UpperCamelCase__ )
A = np.argmax(UpperCamelCase__ , axis=1 )
A = os.path.join(training_args.output_dir , 'predictions.txt' )
if trainer.is_world_process_zero():
with open(UpperCamelCase__ , 'w' ) as writer:
writer.write('index\tprediction\n' )
for index, item in enumerate(UpperCamelCase__ ):
A = label_list[item]
writer.write(f'{index}\t{item}\n' )
if __name__ == "__main__":
main()
| 690 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCamelCase : Any = None
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : str = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCamelCase : Optional[int] = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCamelCase : str = "▁"
# Segments (not really needed)
UpperCamelCase : str = 0
UpperCamelCase : int = 1
UpperCamelCase : List[Any] = 2
UpperCamelCase : Union[str, Any] = 3
UpperCamelCase : Optional[Any] = 4
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = """left"""
lowerCAmelCase = XLNetTokenizer
def __init__( self : Tuple , _lowercase : List[Any]=None , _lowercase : Any=None , _lowercase : int=False , _lowercase : Tuple=True , _lowercase : Union[str, Any]=False , _lowercase : int="<s>" , _lowercase : Optional[int]="</s>" , _lowercase : Dict="<unk>" , _lowercase : Optional[int]="<sep>" , _lowercase : int="<pad>" , _lowercase : Dict="<cls>" , _lowercase : str="<mask>" , _lowercase : List[str]=["<eop>", "<eod>"] , **_lowercase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it
A = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
vocab_file=_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
A = 3
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = False if not self.vocab_file else True
def __a ( self : List[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 690 | 1 |
"""simple docstring"""
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class lowerCamelCase__ ( nn.Module ):
def __init__( self : str , _lowercase : int , _lowercase : int , _lowercase : int , _lowercase : int=0.0 , _lowercase : Optional[int] = None , _lowercase : str = "geglu" , _lowercase : Optional[int] = None , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = True , _lowercase : str = "layer_norm" , _lowercase : bool = False , ):
super().__init__()
A = only_cross_attention
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'
f' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
A = AdaLayerNorm(_lowercase , _lowercase )
elif self.use_ada_layer_norm_zero:
A = AdaLayerNormZero(_lowercase , _lowercase )
else:
A = nn.LayerNorm(_lowercase , elementwise_affine=_lowercase )
A = Attention(
query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , dropout=_lowercase , bias=_lowercase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=_lowercase , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
A = (
AdaLayerNorm(_lowercase , _lowercase )
if self.use_ada_layer_norm
else nn.LayerNorm(_lowercase , elementwise_affine=_lowercase )
)
A = Attention(
query_dim=_lowercase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=_lowercase , dim_head=_lowercase , dropout=_lowercase , bias=_lowercase , upcast_attention=_lowercase , ) # is self-attn if encoder_hidden_states is none
else:
A = None
A = None
# 3. Feed-forward
A = nn.LayerNorm(_lowercase , elementwise_affine=_lowercase )
A = FeedForward(_lowercase , dropout=_lowercase , activation_fn=_lowercase , final_dropout=_lowercase )
# let chunk size default to None
A = None
A = 0
def __a ( self : Tuple , _lowercase : Optional[int] , _lowercase : int ):
# Sets chunk feed-forward
A = chunk_size
A = dim
def __a ( self : Tuple , _lowercase : torch.FloatTensor , _lowercase : Optional[torch.FloatTensor] = None , _lowercase : Optional[torch.FloatTensor] = None , _lowercase : Optional[torch.FloatTensor] = None , _lowercase : Optional[torch.LongTensor] = None , _lowercase : Dict[str, Any] = None , _lowercase : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
A = self.norma(_lowercase , _lowercase )
elif self.use_ada_layer_norm_zero:
A , A , A , A , A = self.norma(
_lowercase , _lowercase , _lowercase , hidden_dtype=hidden_states.dtype )
else:
A = self.norma(_lowercase )
A = cross_attention_kwargs if cross_attention_kwargs is not None else {}
A = self.attna(
_lowercase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=_lowercase , **_lowercase , )
if self.use_ada_layer_norm_zero:
A = gate_msa.unsqueeze(1 ) * attn_output
A = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
A = (
self.norma(_lowercase , _lowercase ) if self.use_ada_layer_norm else self.norma(_lowercase )
)
A = self.attna(
_lowercase , encoder_hidden_states=_lowercase , attention_mask=_lowercase , **_lowercase , )
A = attn_output + hidden_states
# 3. Feed-forward
A = self.norma(_lowercase )
if self.use_ada_layer_norm_zero:
A = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.' )
A = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
A = torch.cat(
[self.ff(_lowercase ) for hid_slice in norm_hidden_states.chunk(_lowercase , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
A = self.ff(_lowercase )
if self.use_ada_layer_norm_zero:
A = gate_mlp.unsqueeze(1 ) * ff_output
A = ff_output + hidden_states
return hidden_states
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Union[str, Any] , _lowercase : int , _lowercase : Optional[int] = None , _lowercase : int = 4 , _lowercase : float = 0.0 , _lowercase : str = "geglu" , _lowercase : bool = False , ):
super().__init__()
A = int(dim * mult )
A = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
A = GELU(_lowercase , _lowercase )
if activation_fn == "gelu-approximate":
A = GELU(_lowercase , _lowercase , approximate='tanh' )
elif activation_fn == "geglu":
A = GEGLU(_lowercase , _lowercase )
elif activation_fn == "geglu-approximate":
A = ApproximateGELU(_lowercase , _lowercase )
A = nn.ModuleList([] )
# project in
self.net.append(_lowercase )
# project dropout
self.net.append(nn.Dropout(_lowercase ) )
# project out
self.net.append(nn.Linear(_lowercase , _lowercase ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(_lowercase ) )
def __a ( self : int , _lowercase : Optional[int] ):
for module in self.net:
A = module(_lowercase )
return hidden_states
class lowerCamelCase__ ( nn.Module ):
def __init__( self : int , _lowercase : int , _lowercase : int , _lowercase : str = "none" ):
super().__init__()
A = nn.Linear(_lowercase , _lowercase )
A = approximate
def __a ( self : Union[str, Any] , _lowercase : Tuple ):
if gate.device.type != "mps":
return F.gelu(_lowercase , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def __a ( self : Dict , _lowercase : Any ):
A = self.proj(_lowercase )
A = self.gelu(_lowercase )
return hidden_states
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Dict , _lowercase : int , _lowercase : int ):
super().__init__()
A = nn.Linear(_lowercase , dim_out * 2 )
def __a ( self : Tuple , _lowercase : Optional[int] ):
if gate.device.type != "mps":
return F.gelu(_lowercase )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def __a ( self : Tuple , _lowercase : str ):
A , A = self.proj(_lowercase ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(_lowercase )
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Tuple , _lowercase : int , _lowercase : int ):
super().__init__()
A = nn.Linear(_lowercase , _lowercase )
def __a ( self : Dict , _lowercase : str ):
A = self.proj(_lowercase )
return x * torch.sigmoid(1.7_0_2 * x )
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Any , _lowercase : Any , _lowercase : Tuple ):
super().__init__()
A = nn.Embedding(_lowercase , _lowercase )
A = nn.SiLU()
A = nn.Linear(_lowercase , embedding_dim * 2 )
A = nn.LayerNorm(_lowercase , elementwise_affine=_lowercase )
def __a ( self : List[Any] , _lowercase : int , _lowercase : Any ):
A = self.linear(self.silu(self.emb(_lowercase ) ) )
A , A = torch.chunk(_lowercase , 2 )
A = self.norm(_lowercase ) * (1 + scale) + shift
return x
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Any , _lowercase : Dict , _lowercase : List[Any] ):
super().__init__()
A = CombinedTimestepLabelEmbeddings(_lowercase , _lowercase )
A = nn.SiLU()
A = nn.Linear(_lowercase , 6 * embedding_dim , bias=_lowercase )
A = nn.LayerNorm(_lowercase , elementwise_affine=_lowercase , eps=1e-6 )
def __a ( self : Tuple , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : Optional[Any] , _lowercase : Optional[Any]=None ):
A = self.linear(self.silu(self.emb(_lowercase , _lowercase , hidden_dtype=_lowercase ) ) )
A , A , A , A , A , A = emb.chunk(6 , dim=1 )
A = self.norm(_lowercase ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Tuple , _lowercase : int , _lowercase : int , _lowercase : int , _lowercase : Optional[str] = None , _lowercase : float = 1e-5 ):
super().__init__()
A = num_groups
A = eps
if act_fn is None:
A = None
else:
A = get_activation(_lowercase )
A = nn.Linear(_lowercase , out_dim * 2 )
def __a ( self : str , _lowercase : int , _lowercase : List[Any] ):
if self.act:
A = self.act(_lowercase )
A = self.linear(_lowercase )
A = emb[:, :, None, None]
A , A = emb.chunk(2 , dim=1 )
A = F.group_norm(_lowercase , self.num_groups , eps=self.eps )
A = x * (1 + scale) + shift
return x
| 690 |
"""simple docstring"""
from __future__ import annotations
UpperCamelCase : Any = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the reference grid
A = 1
A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the action grid
A = init[0]
A = init[1]
A = 0
A = g + heuristic[x][y] # cost from starting cell to destination cell
A = [[f, g, x, y]]
A = False # flag that is set when search is complete
A = False # flag set if we can't find expand
while not found and not resign:
if len(UpperCamelCase__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
A = cell.pop()
A = next_cell[2]
A = next_cell[3]
A = next_cell[1]
if x == goal[0] and y == goal[1]:
A = True
else:
for i in range(len(UpperCamelCase__ ) ): # to try out different valid actions
A = x + DIRECTIONS[i][0]
A = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCamelCase__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
A = g + cost
A = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
A = 1
A = i
A = []
A = goal[0]
A = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
A = x - DIRECTIONS[action[x][y]][0]
A = y - DIRECTIONS[action[x][y]][1]
A = xa
A = ya
invpath.append([x, y] )
A = []
for i in range(len(UpperCamelCase__ ) ):
path.append(invpath[len(UpperCamelCase__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCamelCase : Any = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCamelCase : List[Any] = [0, 0]
# all coordinates are given in format [y,x]
UpperCamelCase : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCamelCase : Tuple = 1
# the cost map which pushes the path closer to the goal
UpperCamelCase : Union[str, Any] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCamelCase : List[str] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCamelCase : Dict = 99
UpperCamelCase , UpperCamelCase : Optional[Any] = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 690 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase__ ( UpperCAmelCase_ ):
def __init__( self : Optional[Any] , *_lowercase : int , **_lowercase : List[Any] ):
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 690 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : int = {"vocab_file": "sentencepiece.model"}
UpperCamelCase : Union[str, Any] = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
}
UpperCamelCase : Union[str, Any] = {
"google/rembert": 256,
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Dict=True , _lowercase : List[str]=True , _lowercase : int="[CLS]" , _lowercase : str="[SEP]" , _lowercase : List[str]="[UNK]" , _lowercase : List[Any]="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : List[str]="[CLS]" , _lowercase : Any="[MASK]" , **_lowercase : Optional[Any] , ):
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = spm.SentencePieceProcessor()
self.sp_model.Load(_lowercase )
@property
def __a ( self : Tuple ):
return len(self.sp_model )
def __a ( self : List[str] ):
A = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : List[str] , _lowercase : int ):
A = d
A = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __a ( self : Dict , _lowercase : Union[str, Any] , _lowercase : Dict=False ):
A = self.sp_model.EncodeAsPieces(_lowercase )
return pieces
def __a ( self : Dict , _lowercase : Tuple ):
return self.sp_model.PieceToId(_lowercase )
def __a ( self : str , _lowercase : Optional[int] ):
return self.sp_model.IdToPiece(_lowercase )
def __a ( self : Optional[int] , _lowercase : Optional[int] ):
A = self.sp_model.decode_pieces(_lowercase )
return out_string
def __a ( self : Optional[int] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def __a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error('Vocabulary path ({}) should be a directory'.format(_lowercase ) )
return
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 690 | 1 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = StableUnCLIPPipeline
lowerCAmelCase = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
lowerCAmelCase = False
def __a ( self : List[str] ):
A = 32
A = embedder_hidden_size
# prior components
torch.manual_seed(0 )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
A = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=_lowercase , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
A = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_lowercase , num_layers=1 , )
torch.manual_seed(0 )
A = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_000 , clip_sample=_lowercase , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
A = StableUnCLIPImageNormalizer(embedding_dim=_lowercase )
A = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
A = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
A = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowercase , layers_per_block=1 , upcast_attention=_lowercase , use_linear_projection=_lowercase , )
torch.manual_seed(0 )
A = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='v_prediction' , set_alpha_to_one=_lowercase , steps_offset=1 , )
torch.manual_seed(0 )
A = AutoencoderKL()
A = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def __a ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Tuple=0 ):
if str(_lowercase ).startswith('mps' ):
A = torch.manual_seed(_lowercase )
else:
A = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __a ( self : str ):
A = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=_lowercase )
def __a ( self : Optional[int] ):
A = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=_lowercase )
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : str ):
A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
A = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A = torch.Generator(device='cpu' ).manual_seed(0 )
A = pipe('anime turle' , generator=_lowercase , output_type='np' )
A = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
def __a ( self : Tuple ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
A = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
A = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 690 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : List[str] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : List[Any] = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
UpperCamelCase : Any = {"mobilebert-uncased": 512}
UpperCamelCase : Any = {}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = MobileBertTokenizer
def __init__( self : Optional[int] , _lowercase : Optional[int]=None , _lowercase : Any=None , _lowercase : Optional[int]=True , _lowercase : int="[UNK]" , _lowercase : Dict="[SEP]" , _lowercase : Any="[PAD]" , _lowercase : str="[CLS]" , _lowercase : Union[str, Any]="[MASK]" , _lowercase : List[Any]=True , _lowercase : Any=None , **_lowercase : Optional[Any] , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , _lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowercase ) != tokenize_chinese_chars
):
A = getattr(_lowercase , normalizer_state.pop('type' ) )
A = do_lower_case
A = strip_accents
A = tokenize_chinese_chars
A = normalizer_class(**_lowercase )
A = do_lower_case
def __a ( self : List[Any] , _lowercase : Tuple , _lowercase : Any=None ):
A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Dict , _lowercase : str , _lowercase : Optional[str] = None ):
A = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 690 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : int = {"vocab_file": "sentencepiece.model"}
UpperCamelCase : Union[str, Any] = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
}
UpperCamelCase : Union[str, Any] = {
"google/rembert": 256,
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Dict=True , _lowercase : List[str]=True , _lowercase : int="[CLS]" , _lowercase : str="[SEP]" , _lowercase : List[str]="[UNK]" , _lowercase : List[Any]="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : List[str]="[CLS]" , _lowercase : Any="[MASK]" , **_lowercase : Optional[Any] , ):
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = spm.SentencePieceProcessor()
self.sp_model.Load(_lowercase )
@property
def __a ( self : Tuple ):
return len(self.sp_model )
def __a ( self : List[str] ):
A = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : List[str] , _lowercase : int ):
A = d
A = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __a ( self : Dict , _lowercase : Union[str, Any] , _lowercase : Dict=False ):
A = self.sp_model.EncodeAsPieces(_lowercase )
return pieces
def __a ( self : Dict , _lowercase : Tuple ):
return self.sp_model.PieceToId(_lowercase )
def __a ( self : str , _lowercase : Optional[int] ):
return self.sp_model.IdToPiece(_lowercase )
def __a ( self : Optional[int] , _lowercase : Optional[int] ):
A = self.sp_model.decode_pieces(_lowercase )
return out_string
def __a ( self : Optional[int] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def __a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error('Vocabulary path ({}) should be a directory'.format(_lowercase ) )
return
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 690 |
"""simple docstring"""
def __snake_case ( UpperCamelCase__ ) -> list[int]:
"""simple docstring"""
A = [0 for i in range(len(UpperCamelCase__ ) )]
# initialize interval's left pointer and right pointer
A , A = 0, 0
for i in range(1 , len(UpperCamelCase__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
A = min(right_pointer - i + 1 , z_result[i - left_pointer] )
A = min_edge
while go_next(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
A , A = i, i + z_result[i] - 1
return z_result
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
"""simple docstring"""
return i + z_result[i] < len(UpperCamelCase__ ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
"""simple docstring"""
A = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
A = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(UpperCamelCase__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 1 |
"""simple docstring"""
import numpy as np
UpperCamelCase : List[Any] = [
["a", "b", "c", "d", "e"],
["f", "g", "h", "i", "k"],
["l", "m", "n", "o", "p"],
["q", "r", "s", "t", "u"],
["v", "w", "x", "y", "z"],
]
class lowerCamelCase__ :
def __init__( self : Any ):
A = np.array(_lowercase )
def __a ( self : Optional[Any] , _lowercase : str ):
A , A = np.where(letter == self.SQUARE )
A = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __a ( self : Any , _lowercase : int , _lowercase : int ):
A = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __a ( self : Optional[Any] , _lowercase : str ):
A = message.lower()
A = message.replace(' ' , '' )
A = message.replace('j' , 'i' )
A = np.empty((2, len(_lowercase )) )
for letter_index in range(len(_lowercase ) ):
A = self.letter_to_numbers(message[letter_index] )
A = numbers[0]
A = numbers[1]
A = first_step.reshape(2 * len(_lowercase ) )
A = ''
for numbers_index in range(len(_lowercase ) ):
A = int(second_step[numbers_index * 2] )
A = int(second_step[(numbers_index * 2) + 1] )
A = self.numbers_to_letter(_lowercase , _lowercase )
A = encoded_message + letter
return encoded_message
def __a ( self : Dict , _lowercase : str ):
A = message.lower()
message.replace(' ' , '' )
A = np.empty(2 * len(_lowercase ) )
for letter_index in range(len(_lowercase ) ):
A = self.letter_to_numbers(message[letter_index] )
A = numbers[0]
A = numbers[1]
A = first_step.reshape((2, len(_lowercase )) )
A = ''
for numbers_index in range(len(_lowercase ) ):
A = int(second_step[0, numbers_index] )
A = int(second_step[1, numbers_index] )
A = self.numbers_to_letter(_lowercase , _lowercase )
A = decoded_message + letter
return decoded_message
| 690 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = LDMTextToImagePipeline
lowerCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
lowerCAmelCase = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase = False
def __a ( self : Dict ):
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
A = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
A = CLIPTextModel(_lowercase )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __a ( self : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any]=0 ):
if str(_lowercase ).startswith('mps' ):
A = torch.manual_seed(_lowercase )
else:
A = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : Any ):
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = LDMTextToImagePipeline(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_dummy_inputs(_lowercase )
A = pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
A = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : int , _lowercase : List[Any] , _lowercase : int=torch.floataa , _lowercase : int=0 ):
A = torch.manual_seed(_lowercase )
A = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
A = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : Union[str, Any] ):
A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_inputs(_lowercase )
A = pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
A = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
A = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : Tuple=torch.floataa , _lowercase : Optional[Any]=0 ):
A = torch.manual_seed(_lowercase )
A = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
A = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : List[str] ):
A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_inputs(_lowercase )
A = pipe(**_lowercase ).images[0]
A = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
A = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 690 | 1 |
"""simple docstring"""
class lowerCamelCase__ :
def __init__( self : Tuple , _lowercase : list ):
A = set_counts
A = max(_lowercase )
A = len(_lowercase )
A = [1] * num_sets
A = list(range(_lowercase ) )
def __a ( self : Optional[int] , _lowercase : int , _lowercase : int ):
A = self.get_parent(_lowercase )
A = self.get_parent(_lowercase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
A = 0
A = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
A = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
A = 0
A = src_parent
A = self.set_counts[src_parent]
A = max(self.max_set , _lowercase )
return True
def __a ( self : List[str] , _lowercase : int ):
if self.parents[disj_set] == disj_set:
return disj_set
A = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 690 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
A = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase , cache_dir=_lowercase )
A = [t[-1] for t in os.walk(os.path.join(_lowercase , os.listdir(_lowercase )[0] , 'snapshots' ) )]
A = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 4
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1e-3
assert np.abs(np.abs(_lowercase , dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1
A = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_lowercase ) == num_samples
def __a ( self : Dict ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1
def __a ( self : List[str] ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __a ( self : str ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __a ( self : Any ):
A = FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , set_alpha_to_one=_lowercase , steps_offset=1 , )
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_lowercase , safety_checker=_lowercase , )
A = scheduler.create_state()
A = scheduler_state
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1
def __a ( self : List[str] ):
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.device_count()
A = num_samples * [prompt]
A = jax.random.split(jax.random.PRNGKey(0 ) , _lowercase )
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , )
A = replicate(_lowercase )
A = pipeline.prepare_inputs(_lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
A = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , use_memory_efficient_attention=_lowercase , )
A = replicate(_lowercase )
A = pipeline.prepare_inputs(_lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
A = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 690 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase : Dict = logging.get_logger(__name__)
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = ["""pixel_values"""]
def __init__( self : str , _lowercase : bool = True , _lowercase : Optional[Dict[str, int]] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 255 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : Any , ):
super().__init__(**_lowercase )
A = size if size is not None else {'shortest_edge': 256}
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(_lowercase )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self : Any , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Tuple , ):
A = get_size_dict(_lowercase , default_to_square=_lowercase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(_lowercase , size=size['shortest_edge'] , default_to_square=_lowercase )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : Dict , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : List[str] , ):
A = get_size_dict(_lowercase )
return center_crop(_lowercase , size=(size['height'], size['width']) , data_format=_lowercase , **_lowercase )
def __a ( self : Dict , _lowercase : np.ndarray , _lowercase : float , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Dict ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : List[Any] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : int , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : List[str] , _lowercase : ImageInput , _lowercase : Optional[bool] = None , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[float] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_lowercase : Any , ):
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(_lowercase )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
A = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_center_crop:
A = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images]
if do_rescale:
A = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
A = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
A = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 690 |
"""simple docstring"""
import os
import sys
UpperCamelCase : Optional[int] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCamelCase : Dict = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
"""simple docstring"""
return AutoConfig.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
"""simple docstring"""
return AutoModel.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
| 690 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=UpperCAmelCase_ ):
lowerCAmelCase = ["""torch""", """transformers""", """onnx"""]
def __init__( self : Optional[Any] , *_lowercase : str , **_lowercase : Union[str, Any] ):
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __a ( cls : Dict , *_lowercase : List[Any] , **_lowercase : Tuple ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __a ( cls : str , *_lowercase : List[Any] , **_lowercase : List[Any] ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class lowerCamelCase__ ( metaclass=UpperCAmelCase_ ):
lowerCAmelCase = ["""torch""", """transformers""", """onnx"""]
def __init__( self : Tuple , *_lowercase : str , **_lowercase : str ):
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __a ( cls : Union[str, Any] , *_lowercase : Union[str, Any] , **_lowercase : List[str] ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __a ( cls : List[Any] , *_lowercase : Union[str, Any] , **_lowercase : Any ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class lowerCamelCase__ ( metaclass=UpperCAmelCase_ ):
lowerCAmelCase = ["""torch""", """transformers""", """onnx"""]
def __init__( self : Union[str, Any] , *_lowercase : str , **_lowercase : Any ):
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __a ( cls : List[str] , *_lowercase : str , **_lowercase : Any ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __a ( cls : Optional[Any] , *_lowercase : str , **_lowercase : str ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class lowerCamelCase__ ( metaclass=UpperCAmelCase_ ):
lowerCAmelCase = ["""torch""", """transformers""", """onnx"""]
def __init__( self : str , *_lowercase : Optional[Any] , **_lowercase : Optional[Any] ):
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __a ( cls : str , *_lowercase : Tuple , **_lowercase : Union[str, Any] ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __a ( cls : Tuple , *_lowercase : List[str] , **_lowercase : List[Any] ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class lowerCamelCase__ ( metaclass=UpperCAmelCase_ ):
lowerCAmelCase = ["""torch""", """transformers""", """onnx"""]
def __init__( self : List[Any] , *_lowercase : int , **_lowercase : int ):
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __a ( cls : Any , *_lowercase : Optional[int] , **_lowercase : int ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __a ( cls : Optional[Any] , *_lowercase : Optional[int] , **_lowercase : Tuple ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class lowerCamelCase__ ( metaclass=UpperCAmelCase_ ):
lowerCAmelCase = ["""torch""", """transformers""", """onnx"""]
def __init__( self : int , *_lowercase : Any , **_lowercase : Optional[Any] ):
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __a ( cls : Optional[Any] , *_lowercase : Optional[int] , **_lowercase : Any ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __a ( cls : Optional[int] , *_lowercase : Any , **_lowercase : Optional[Any] ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 690 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase : List[str] = logging.get_logger(__name__)
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = ["""pixel_values"""]
def __init__( self : Tuple , _lowercase : bool = True , _lowercase : Optional[Dict[str, int]] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 255 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : List[str] , ):
super().__init__(**_lowercase )
A = size if size is not None else {'shortest_edge': 256}
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(_lowercase , param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self : Any , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Tuple , ):
A = get_size_dict(_lowercase , default_to_square=_lowercase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(_lowercase , size=size['shortest_edge'] , default_to_square=_lowercase )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : List[Any] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
A = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_lowercase , size=(size['height'], size['width']) , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : float , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Tuple ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : Any , _lowercase : ImageInput , _lowercase : Optional[bool] = None , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[float] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_lowercase : Any , ):
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(_lowercase , param_name='crop_size' )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
A = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_center_crop:
A = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images]
if do_rescale:
A = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
A = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
A = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def __a ( self : int , _lowercase : List[str] , _lowercase : List[Tuple] = None ):
A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(_lowercase ):
A = target_sizes.numpy()
A = []
for idx in range(len(_lowercase ) ):
A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=_lowercase )
A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
A = logits.argmax(dim=1 )
A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 690 | 1 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __a ( self : int ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
A = 'A painting of a squirrel eating a burger'
A = jax.device_count()
A = num_samples * [prompt]
A = sd_pipe.prepare_inputs(_lowercase )
A = replicate(_lowercase )
A = shard(_lowercase )
A = jax.random.PRNGKey(0 )
A = jax.random.split(_lowercase , jax.device_count() )
A = sd_pipe(_lowercase , _lowercase , _lowercase , num_inference_steps=25 , jit=_lowercase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
A = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
A = images[0, 253:256, 253:256, -1]
A = jnp.asarray(jax.device_get(image_slice.flatten() ) )
A = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def __a ( self : Dict ):
A = 'stabilityai/stable-diffusion-2'
A , A = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowercase , subfolder='scheduler' )
A , A = FlaxStableDiffusionPipeline.from_pretrained(
_lowercase , scheduler=_lowercase , revision='bf16' , dtype=jnp.bfloataa , )
A = scheduler_params
A = 'A painting of a squirrel eating a burger'
A = jax.device_count()
A = num_samples * [prompt]
A = sd_pipe.prepare_inputs(_lowercase )
A = replicate(_lowercase )
A = shard(_lowercase )
A = jax.random.PRNGKey(0 )
A = jax.random.split(_lowercase , jax.device_count() )
A = sd_pipe(_lowercase , _lowercase , _lowercase , num_inference_steps=25 , jit=_lowercase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
A = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
A = images[0, 253:256, 253:256, -1]
A = jnp.asarray(jax.device_get(image_slice.flatten() ) )
A = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 690 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __snake_case ( UpperCamelCase__ = "laptop" ) -> DataFrame:
"""simple docstring"""
A = f'https://www.amazon.in/laptop/s?k={product}'
A = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
A = BeautifulSoup(requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).text )
# Initialize a Pandas dataframe with the column titles
A = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
A = item.ha.text
A = 'https://www.amazon.in/' + item.ha.a['href']
A = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
A = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
A = 'Not available'
try:
A = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
A = ''
try:
A = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 100 )
except ValueError:
A = float('nan' )
except AttributeError:
pass
A = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A = ' '
A = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
UpperCamelCase : Any = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 690 | 1 |
"""simple docstring"""
from __future__ import annotations
UpperCamelCase : Any = 8.988E9 # units = N * m^s * C^-2
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> dict[str, float]:
"""simple docstring"""
A = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if distance < 0:
raise ValueError('Distance cannot be negative' )
if force == 0:
A = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
A = abs(UpperCamelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
A = abs(UpperCamelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
A = (COULOMBS_CONSTANT * charge_product / abs(UpperCamelCase__ )) ** 0.5
return {"distance": distance}
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self : List[str] , _lowercase : Optional[Any] , _lowercase : int=7 , _lowercase : List[str]=3 , _lowercase : Tuple=18 , _lowercase : Dict=30 , _lowercase : Any=400 , _lowercase : int=True , _lowercase : List[Any]=None , _lowercase : Tuple=True , _lowercase : List[Any]=False , _lowercase : str=True , _lowercase : List[str]=True , _lowercase : int=[0.5, 0.5, 0.5] , _lowercase : Optional[int]=[0.5, 0.5, 0.5] , ):
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size if size is not None else {'height': 18, 'width': 20}
A = do_thumbnail
A = do_align_axis
A = do_pad
A = do_normalize
A = image_mean
A = image_std
def __a ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = DonutImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
A = DonutImageProcessingTester(self )
@property
def __a ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Union[str, Any] ):
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , 'do_resize' ) )
self.assertTrue(hasattr(_lowercase , 'size' ) )
self.assertTrue(hasattr(_lowercase , 'do_thumbnail' ) )
self.assertTrue(hasattr(_lowercase , 'do_align_long_axis' ) )
self.assertTrue(hasattr(_lowercase , 'do_pad' ) )
self.assertTrue(hasattr(_lowercase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowercase , 'image_mean' ) )
self.assertTrue(hasattr(_lowercase , 'image_std' ) )
def __a ( self : int ):
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
A = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
A = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def __a ( self : Any ):
pass
@is_flaky()
def __a ( self : int ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __a ( self : List[str] ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __a ( self : List[Any] ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 690 | 1 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> list[float]:
"""simple docstring"""
A , A = coefficient_matrix.shape
A , A = constant_matrix.shape
if rowsa != colsa:
A = f'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(UpperCamelCase__ )
if colsa != 1:
A = f'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(UpperCamelCase__ )
if rowsa != rowsa:
A = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
f'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(UpperCamelCase__ )
if len(UpperCamelCase__ ) != rowsa:
A = (
'Number of initial values must be equal to number of rows in coefficient '
f'matrix but received {len(UpperCamelCase__ )} and {rowsa}'
)
raise ValueError(UpperCamelCase__ )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
A = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
A , A = table.shape
strictly_diagonally_dominant(UpperCamelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(UpperCamelCase__ ):
A = []
for row in range(UpperCamelCase__ ):
A = 0
for col in range(UpperCamelCase__ ):
if col == row:
A = table[row][col]
elif col == cols - 1:
A = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A = (temp + val) / denom
new_val.append(UpperCamelCase__ )
A = new_val
return [float(UpperCamelCase__ ) for i in new_val]
def __snake_case ( UpperCamelCase__ ) -> bool:
"""simple docstring"""
A , A = table.shape
A = True
for i in range(0 , UpperCamelCase__ ):
A = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase__ :
def __init__( self : Optional[Any] , _lowercase : int=2 , _lowercase : Optional[Any]=3 , _lowercase : Any=64 , _lowercase : Tuple=None ):
A = np.random.default_rng(_lowercase )
A = length
A = rng.normal(size=(length,) ).astype(np.floataa )
A = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : str ):
return self.length
def __getitem__( self : List[str] , _lowercase : int ):
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase__ ( torch.nn.Module ):
def __init__( self : Optional[int] , _lowercase : Any=0 , _lowercase : List[Any]=0 , _lowercase : Optional[int]=False ):
super().__init__()
A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A = True
def __a ( self : Optional[Any] , _lowercase : str=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
A = False
return x * self.a[0] + self.b[0]
class lowerCamelCase__ ( torch.nn.Module ):
def __init__( self : Optional[Any] , _lowercase : Any=0 , _lowercase : List[str]=0 , _lowercase : str=False ):
super().__init__()
A = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
A = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
A = True
def __a ( self : int , _lowercase : Tuple=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
A = False
return x * self.a + self.b
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ = 16 ) -> Optional[Any]:
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
A = AutoTokenizer.from_pretrained('bert-base-cased' )
A = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
A = load_dataset('csv' , data_files=UpperCamelCase__ )
A = datasets['train'].unique('label' )
A = {v: i for i, v in enumerate(UpperCamelCase__ )}
def tokenize_function(UpperCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
A = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
if "label" in examples:
A = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(UpperCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(UpperCamelCase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
A = DataLoader(tokenized_datasets['train'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=2 )
A = DataLoader(tokenized_datasets['validation'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 690 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : List[Any] = logging.get_logger(__name__)
def __snake_case ( UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
A = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
A = 128
elif "12-12" in model_name:
A = 12
A = 12
elif "14-14" in model_name:
A = 14
A = 14
elif "16-16" in model_name:
A = 16
A = 16
else:
raise ValueError('Model not supported' )
A = 'huggingface/label-files'
if "speech-commands" in model_name:
A = 35
A = 'speech-commands-v2-id2label.json'
else:
A = 527
A = 'audioset-id2label.json'
A = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
A = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
if "module.v" in name:
A = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
A = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
A = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
A = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
A = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
A = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
A = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
A = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
A = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
A = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
A = orig_state_dict.pop(UpperCamelCase__ )
if "qkv" in key:
A = key.split('.' )
A = int(key_split[3] )
A = config.hidden_size
if "weight" in key:
A = val[:dim, :]
A = val[dim : dim * 2, :]
A = val[-dim:, :]
else:
A = val[:dim]
A = val[dim : dim * 2]
A = val[-dim:]
else:
A = val
return orig_state_dict
def __snake_case ( UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
A = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
@torch.no_grad()
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> Any:
"""simple docstring"""
A = get_audio_spectrogram_transformer_config(UpperCamelCase__ )
A = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
A = model_name_to_url[model_name]
A = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='cpu' )
# remove some keys
remove_keys(UpperCamelCase__ )
# rename some keys
A = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
# load 🤗 model
A = ASTForAudioClassification(UpperCamelCase__ )
model.eval()
model.load_state_dict(UpperCamelCase__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
A = -4.2_6_7_7_3_9_3 if 'speech-commands' not in model_name else -6.8_4_5_9_7_8
A = 4.5_6_8_9_9_7_4 if 'speech-commands' not in model_name else 5.5_6_5_4_5_2_6
A = 1024 if 'speech-commands' not in model_name else 128
A = ASTFeatureExtractor(mean=UpperCamelCase__ , std=UpperCamelCase__ , max_length=UpperCamelCase__ )
if "speech-commands" in model_name:
A = load_dataset('speech_commands' , 'v0.02' , split='validation' )
A = dataset[0]['audio']['array']
else:
A = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
A , A = torchaudio.load(UpperCamelCase__ )
A = waveform.squeeze().numpy()
A = feature_extractor(UpperCamelCase__ , sampling_rate=16000 , return_tensors='pt' )
# forward pass
A = model(**UpperCamelCase__ )
A = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
A = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
A = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
A = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
A = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
A = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
A = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
A = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] )
elif model_name == "ast-finetuned-speech-commands-v2":
A = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase__ )
print(f'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(f'MIT/{model_name}' )
feature_extractor.push_to_hub(f'MIT/{model_name}' )
if __name__ == "__main__":
UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="ast-finetuned-audioset-10-10-0.4593",
type=str,
help="Name of the Audio Spectrogram Transformer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCamelCase : List[Any] = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 690 |
"""simple docstring"""
from __future__ import annotations
def __snake_case ( UpperCamelCase__ ) -> list[int]: # This function is recursive
"""simple docstring"""
A = len(UpperCamelCase__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
A = array[0]
A = False
A = 1
A = []
while not is_found and i < array_length:
if array[i] < pivot:
A = True
A = [element for element in array[i:] if element >= array[i]]
A = longest_subsequence(UpperCamelCase__ )
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
A = temp_array
else:
i += 1
A = [element for element in array[1:] if element >= pivot]
A = [pivot, *longest_subsequence(UpperCamelCase__ )]
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 1 |
"""simple docstring"""
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def __snake_case ( UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def __snake_case ( ) -> Any:
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def __snake_case ( ) -> str:
"""simple docstring"""
A = 'mock-s3-bucket'
A = f's3://{mock_bucket}'
A = extract_path_from_uri(UpperCamelCase__ )
assert dataset_path.startswith('s3://' ) is False
A = './local/path'
A = extract_path_from_uri(UpperCamelCase__ )
assert dataset_path == new_dataset_path
def __snake_case ( UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
A = is_remote_filesystem(UpperCamelCase__ )
assert is_remote is True
A = fsspec.filesystem('file' )
A = is_remote_filesystem(UpperCamelCase__ )
assert is_remote is False
@pytest.mark.parametrize('compression_fs_class' , UpperCamelCase__ )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
A = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_file, 'bz2': bza_file, 'lz4': lza_file}
A = input_paths[compression_fs_class.protocol]
if input_path is None:
A = f'for \'{compression_fs_class.protocol}\' compression protocol, '
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCamelCase__ )
A = fsspec.filesystem(compression_fs_class.protocol , fo=UpperCamelCase__ )
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
A = os.path.basename(UpperCamelCase__ )
A = expected_filename[: expected_filename.rindex('.' )]
assert fs.glob('*' ) == [expected_filename]
with fs.open(UpperCamelCase__ , 'r' , encoding='utf-8' ) as f, open(UpperCamelCase__ , encoding='utf-8' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('protocol' , ['zip', 'gzip'] )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
A = {'zip': zip_jsonl_path, 'gzip': jsonl_gz_path}
A = compressed_file_paths[protocol]
A = 'dataset.jsonl'
A = f'{protocol}://{member_file_path}::{compressed_file_path}'
A , *A = fsspec.get_fs_token_paths(UpperCamelCase__ )
assert fs.isfile(UpperCamelCase__ )
assert not fs.isfile('non_existing_' + member_file_path )
@pytest.mark.integration
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
A = hf_api.dataset_info(UpperCamelCase__ , token=UpperCamelCase__ )
A = HfFileSystem(repo_info=UpperCamelCase__ , token=UpperCamelCase__ )
assert sorted(hffs.glob('*' ) ) == [".gitattributes", "data"]
assert hffs.isdir('data' )
assert hffs.isfile('.gitattributes' ) and hffs.isfile('data/text_data.txt' )
with open(UpperCamelCase__ ) as f:
assert hffs.open('data/text_data.txt' , 'r' ).read() == f.read()
def __snake_case ( ) -> Tuple:
"""simple docstring"""
A = 'bz2'
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(UpperCamelCase__ , UpperCamelCase__ , clobber=UpperCamelCase__ )
with pytest.warns(UpperCamelCase__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(UpperCamelCase__ ) == 1
assert (
str(warning_info[0].message )
== f'A filesystem protocol was already set for {protocol} and will be overwritten.'
)
| 690 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
UpperCamelCase : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
UpperCamelCase : Optional[int] = typing.Union[np.floataa, int, float] # noqa: UP007
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> VectorOut:
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(UpperCamelCase__ ) - np.asarray(UpperCamelCase__ )) ** 2 ) )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> VectorOut:
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(UpperCamelCase__ , UpperCamelCase__ ) ) ** (1 / 2)
if __name__ == "__main__":
def __snake_case ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
benchmark()
| 690 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self : str , _lowercase : Optional[int] , _lowercase : Optional[int]=7 , _lowercase : Optional[int]=3 , _lowercase : Tuple=18 , _lowercase : List[Any]=30 , _lowercase : Optional[int]=400 , _lowercase : List[str]=True , _lowercase : List[str]=None , _lowercase : Union[str, Any]=True , ):
A = size if size is not None else {'height': 18, 'width': 18}
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size
A = apply_ocr
def __a ( self : List[str] ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __a ( self : Union[str, Any] ):
A = LayoutLMvaImageProcessingTester(self )
@property
def __a ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : str ):
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , 'do_resize' ) )
self.assertTrue(hasattr(_lowercase , 'size' ) )
self.assertTrue(hasattr(_lowercase , 'apply_ocr' ) )
def __a ( self : Tuple ):
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
A = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __a ( self : Union[str, Any] ):
pass
def __a ( self : Optional[Any] ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , _lowercase )
self.assertIsInstance(encoding.boxes , _lowercase )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __a ( self : List[Any] ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __a ( self : Optional[int] ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __a ( self : Any ):
# with apply_OCR = True
A = LayoutLMvaImageProcessor()
from datasets import load_dataset
A = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
A = Image.open(ds[0]['file'] ).convert('RGB' )
A = image_processing(_lowercase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
A = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
A = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _lowercase )
self.assertListEqual(encoding.boxes , _lowercase )
# with apply_OCR = False
A = LayoutLMvaImageProcessor(apply_ocr=_lowercase )
A = image_processing(_lowercase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 690 |
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
UpperCamelCase : List[Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False , ) -> Any:
"""simple docstring"""
output_path.parent.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , enable_onnx_checker=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
else:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
@torch.no_grad()
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> str:
"""simple docstring"""
A = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
A = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
A = 'cpu'
A = StableDiffusionPipeline.from_pretrained(UpperCamelCase__ , torch_dtype=UpperCamelCase__ ).to(UpperCamelCase__ )
A = Path(UpperCamelCase__ )
# TEXT ENCODER
A = pipeline.text_encoder.config.max_position_embeddings
A = pipeline.text_encoder.config.hidden_size
A = pipeline.tokenizer(
'A sample prompt' , padding='max_length' , max_length=pipeline.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors='pt' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=UpperCamelCase__ , dtype=torch.intaa )) , output_path=output_path / 'text_encoder' / 'model.onnx' , ordered_input_names=['input_ids'] , output_names=['last_hidden_state', 'pooler_output'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'sequence'},
} , opset=UpperCamelCase__ , )
del pipeline.text_encoder
# UNET
A = pipeline.unet.config.in_channels
A = pipeline.unet.config.sample_size
A = output_path / 'unet' / 'model.onnx'
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(2 ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(2 , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=UpperCamelCase__ , ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'] , output_names=['out_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'timestep': {0: 'batch'},
'encoder_hidden_states': {0: 'batch', 1: 'sequence'},
} , opset=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , )
A = str(unet_path.absolute().as_posix() )
A = os.path.dirname(UpperCamelCase__ )
A = onnx.load(UpperCamelCase__ )
# clean up existing tensor files
shutil.rmtree(UpperCamelCase__ )
os.mkdir(UpperCamelCase__ )
# collate external tensor files into one
onnx.save_model(
UpperCamelCase__ , UpperCamelCase__ , save_as_external_data=UpperCamelCase__ , all_tensors_to_one_file=UpperCamelCase__ , location='weights.pb' , convert_attribute=UpperCamelCase__ , )
del pipeline.unet
# VAE ENCODER
A = pipeline.vae
A = vae_encoder.config.in_channels
A = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
A = lambda UpperCamelCase__ , UpperCamelCase__ : vae_encoder.encode(UpperCamelCase__ , UpperCamelCase__ )[0].sample()
onnx_export(
UpperCamelCase__ , model_args=(
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=output_path / 'vae_encoder' / 'model.onnx' , ordered_input_names=['sample', 'return_dict'] , output_names=['latent_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=UpperCamelCase__ , )
# VAE DECODER
A = pipeline.vae
A = vae_decoder.config.latent_channels
A = vae_decoder.config.out_channels
# forward only through the decoder part
A = vae_encoder.decode
onnx_export(
UpperCamelCase__ , model_args=(
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=UpperCamelCase__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
A = pipeline.safety_checker
A = safety_checker.config.vision_config.num_channels
A = safety_checker.config.vision_config.image_size
A = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
) , output_path=output_path / 'safety_checker' / 'model.onnx' , ordered_input_names=['clip_input', 'images'] , output_names=['out_images', 'has_nsfw_concepts'] , dynamic_axes={
'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'},
} , opset=UpperCamelCase__ , )
del pipeline.safety_checker
A = OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker' )
A = pipeline.feature_extractor
else:
A = None
A = None
A = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet' ) , scheduler=pipeline.scheduler , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(UpperCamelCase__ )
print('ONNX pipeline saved to' , UpperCamelCase__ )
del pipeline
del onnx_pipeline
A = OnnxStableDiffusionPipeline.from_pretrained(UpperCamelCase__ , provider='CPUExecutionProvider' )
print('ONNX pipeline is loadable' )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
UpperCamelCase : str = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 690 | 1 |
"""simple docstring"""
from __future__ import annotations
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None ) -> None:
"""simple docstring"""
if start is None:
A = 0
if end is None:
A = len(UpperCamelCase__ ) - 1
if start >= end:
return
A = (start + end) // 2
slowsort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
slowsort(UpperCamelCase__ , mid + 1 , UpperCamelCase__ )
if sequence[end] < sequence[mid]:
A , A = sequence[mid], sequence[end]
slowsort(UpperCamelCase__ , UpperCamelCase__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 690 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
UpperCamelCase : List[str] = Lock()
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
A = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
A = min(UpperCamelCase__ , UpperCamelCase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
A = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
A = max(UpperCamelCase__ , UpperCamelCase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(UpperCamelCase__ )
def __snake_case ( UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
A = []
A = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
A = Pipe()
A = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
A = temp_rs
A = temp_rr
for i in range(1 , len(UpperCamelCase__ ) - 1 ):
A = Pipe()
A = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
A = temp_rs
A = temp_rr
process_array_.append(
Process(
target=UpperCamelCase__ , args=(
len(UpperCamelCase__ ) - 1,
arr[len(UpperCamelCase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(UpperCamelCase__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(UpperCamelCase__ ) ):
A = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __snake_case ( ) -> Optional[Any]:
"""simple docstring"""
A = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*UpperCamelCase__ )
A = odd_even_transposition(UpperCamelCase__ )
print('Sorted List\n' )
print(*UpperCamelCase__ )
if __name__ == "__main__":
main()
| 690 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = ["""image_processor""", """tokenizer"""]
lowerCAmelCase = """BlipImageProcessor"""
lowerCAmelCase = """AutoTokenizer"""
def __init__( self : Any , _lowercase : Any , _lowercase : str ):
A = False
super().__init__(_lowercase , _lowercase )
A = self.image_processor
def __call__( self : Optional[Any] , _lowercase : ImageInput = None , _lowercase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _lowercase : bool = True , _lowercase : Union[bool, str, PaddingStrategy] = False , _lowercase : Union[bool, str, TruncationStrategy] = None , _lowercase : Optional[int] = None , _lowercase : int = 0 , _lowercase : Optional[int] = None , _lowercase : Optional[bool] = None , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = True , _lowercase : Optional[Union[str, TensorType]] = None , **_lowercase : Optional[int] , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
A = self.tokenizer
A = self.tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
return text_encoding
# add pixel_values
A = self.image_processor(_lowercase , return_tensors=_lowercase )
if text is not None:
A = self.tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
else:
A = None
if text_encoding is not None:
encoding_image_processor.update(_lowercase )
return encoding_image_processor
def __a ( self : Tuple , *_lowercase : int , **_lowercase : Tuple ):
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def __a ( self : int , *_lowercase : str , **_lowercase : Optional[Any] ):
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __a ( self : List[str] ):
A = self.tokenizer.model_input_names
A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 690 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
UpperCamelCase : int = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
UpperCamelCase : List[Any] = dataset.iloc[:, 1:2].values
UpperCamelCase : Any = dataset.iloc[:, 2].values
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = train_test_split(X, y, test_size=0.2, random_state=0)
UpperCamelCase : List[str] = PolynomialFeatures(degree=4)
UpperCamelCase : Optional[int] = poly_reg.fit_transform(X)
UpperCamelCase : List[Any] = LinearRegression()
pol_reg.fit(X_poly, y)
def __snake_case ( ) -> Optional[int]:
"""simple docstring"""
plt.scatter(UpperCamelCase__ , UpperCamelCase__ , color='red' )
plt.plot(UpperCamelCase__ , pol_reg.predict(poly_reg.fit_transform(UpperCamelCase__ ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 690 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Dict = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 690 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = ["""pixel_values"""]
def __init__( self : List[str] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 255 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : bool = True , **_lowercase : Tuple , ):
super().__init__(**_lowercase )
A = size if size is not None else {'shortest_edge': 224}
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(_lowercase , default_to_square=_lowercase , param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A = image_std if image_std is not None else OPENAI_CLIP_STD
A = do_convert_rgb
def __a ( self : str , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : List[str] , ):
A = get_size_dict(_lowercase , default_to_square=_lowercase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(_lowercase , size=size['shortest_edge'] , default_to_square=_lowercase )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
A = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_lowercase , size=(size['height'], size['width']) , data_format=_lowercase , **_lowercase )
def __a ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : List[str] , ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Union[str, Any] , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : Optional[int] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : int = None , _lowercase : bool = None , _lowercase : float = None , _lowercase : bool = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : bool = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[ChannelDimension] = ChannelDimension.FIRST , **_lowercase : int , ):
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(_lowercase , param_name='size' , default_to_square=_lowercase )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(_lowercase , param_name='crop_size' , default_to_square=_lowercase )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A = [convert_to_rgb(_lowercase ) for image in images]
# All transformations expect numpy arrays.
A = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
A = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_center_crop:
A = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images]
if do_rescale:
A = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
A = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
A = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 690 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
def __snake_case ( UpperCamelCase__ , UpperCamelCase__=False ) -> Any:
"""simple docstring"""
A = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'deit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'deit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'deit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'deit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'deit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'deit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'deit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'deit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'deit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'deit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
A = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> str:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A = ''
else:
A = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
A = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A = in_proj_weight[
: config.hidden_size, :
]
A = in_proj_bias[: config.hidden_size]
A = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A = in_proj_weight[
-config.hidden_size :, :
]
A = in_proj_bias[-config.hidden_size :]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
A = dct.pop(UpperCamelCase__ )
A = val
def __snake_case ( ) -> Dict:
"""simple docstring"""
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
A = DeiTConfig()
# all deit models have fine-tuned heads
A = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
A = 1000
A = 'huggingface/label-files'
A = 'imagenet-1k-id2label.json'
A = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
A = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
A = int(deit_name[-6:-4] )
A = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
A = 192
A = 768
A = 12
A = 3
elif deit_name[9:].startswith('small' ):
A = 384
A = 1536
A = 12
A = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
A = 1024
A = 4096
A = 24
A = 16
# load original model from timm
A = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A = timm_model.state_dict()
A = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# load HuggingFace model
A = DeiTForImageClassificationWithTeacher(UpperCamelCase__ ).eval()
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image, prepared by DeiTImageProcessor
A = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
A = DeiTImageProcessor(size=UpperCamelCase__ , crop_size=config.image_size )
A = image_processor(images=prepare_img() , return_tensors='pt' )
A = encoding['pixel_values']
A = model(UpperCamelCase__ )
A = timm_model(UpperCamelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase__ , outputs.logits , atol=1E-3 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f'Saving model {deit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCamelCase : Union[str, Any] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 690 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Union[str, Any] ):
A = torch.nn.Linear(10 , 10 )
A = torch.optim.SGD(model.parameters() , 0.1 )
A = Accelerator()
A = accelerator.prepare(_lowercase )
try:
pickle.loads(pickle.dumps(_lowercase ) )
except Exception as e:
self.fail(f'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state()
| 690 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase : int = {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/config.json",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """xglm"""
lowerCAmelCase = ["""past_key_values"""]
lowerCAmelCase = {
"""num_attention_heads""": """attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Union[str, Any] , _lowercase : Dict=256_008 , _lowercase : str=2_048 , _lowercase : int=1_024 , _lowercase : Optional[int]=4_096 , _lowercase : Optional[Any]=24 , _lowercase : int=16 , _lowercase : int="gelu" , _lowercase : Optional[Any]=0.1 , _lowercase : Dict=0.1 , _lowercase : str=0.0 , _lowercase : Any=0.0 , _lowercase : Any=0.0_2 , _lowercase : List[str]=True , _lowercase : Union[str, Any]=True , _lowercase : List[Any]=2 , _lowercase : Any=1 , _lowercase : int=0 , _lowercase : Optional[int]=2 , **_lowercase : int , ):
A = vocab_size
A = max_position_embeddings
A = d_model
A = ffn_dim
A = num_layers
A = attention_heads
A = activation_function
A = dropout
A = attention_dropout
A = activation_dropout
A = layerdrop
A = init_std
A = scale_embedding # scale factor will be sqrt(d_model) if True
A = use_cache
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , decoder_start_token_id=_lowercase , **_lowercase , )
| 690 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """convbert"""
def __init__( self : Optional[int] , _lowercase : List[Any]=30_522 , _lowercase : List[str]=768 , _lowercase : Optional[Any]=12 , _lowercase : Any=12 , _lowercase : str=3_072 , _lowercase : List[str]="gelu" , _lowercase : Dict=0.1 , _lowercase : Dict=0.1 , _lowercase : Any=512 , _lowercase : List[str]=2 , _lowercase : Tuple=0.0_2 , _lowercase : List[Any]=1e-12 , _lowercase : List[str]=1 , _lowercase : Tuple=0 , _lowercase : Any=2 , _lowercase : Union[str, Any]=768 , _lowercase : str=2 , _lowercase : Any=9 , _lowercase : Union[str, Any]=1 , _lowercase : Dict=None , **_lowercase : Union[str, Any] , ):
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase , )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = embedding_size
A = head_ratio
A = conv_kernel_size
A = num_groups
A = classifier_dropout
class lowerCamelCase__ ( UpperCAmelCase_ ):
@property
def __a ( self : str ):
if self.task == "multiple-choice":
A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 690 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : Any = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
UpperCamelCase : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split('.' ):
A = getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
A = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
A = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
A = value
elif weight_type == "weight_g":
A = value
elif weight_type == "weight_v":
A = value
elif weight_type == "bias":
A = value
else:
A = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
A = []
A = fairseq_model.state_dict()
A = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
A = None
for name, value in fairseq_dict.items():
A = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == 'group' , )
A = True
elif name.split('.' )[0] == "proj":
A = fairseq_model.proj
A = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
A = True
if "*" in mapped_key:
A = name.split(UpperCamelCase__ )[0].split('.' )[-2]
A = mapped_key.replace('*' , UpperCamelCase__ )
if "weight_g" in name:
A = 'weight_g'
elif "weight_v" in name:
A = 'weight_v'
elif "bias" in name:
A = 'bias'
elif "weight" in name:
A = 'weight'
else:
A = None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(f'Unused weights: {unused_weights}' )
return proj_weight
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
A = full_name.split('conv_layers.' )[-1]
A = name.split('.' )
A = int(items[0] )
A = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
A = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
A = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
A = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
A = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCamelCase__ )
def __snake_case ( UpperCamelCase__ ) -> Any:
"""simple docstring"""
A , A = emb.weight.shape
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = emb.weight.data
return lin_layer
def __snake_case ( UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
with open(UpperCamelCase__ , 'r' , encoding='utf-8' ) as f:
A = f.readlines()
A = [line.split(' ' )[0] for line in lines]
A = len(UpperCamelCase__ )
A = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(UpperCamelCase__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> Optional[int]:
"""simple docstring"""
A = WavaVecaConfig.from_pretrained(UpperCamelCase__ )
A = SpeechaTextaConfig.from_pretrained(
UpperCamelCase__ , vocab_size=UpperCamelCase__ , decoder_layers=UpperCamelCase__ , do_stable_layer_norm=UpperCamelCase__ )
A = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , )
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
A = model[0].eval()
# set weights for wav2vec2 encoder
A = WavaVecaModel(UpperCamelCase__ )
A = recursively_load_weights_wavaveca(model.encoder , UpperCamelCase__ )
A = SpeechaTextaForCausalLM(UpperCamelCase__ )
A , A = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCamelCase__ )
# set output linear layer
unexpected_keys.remove('embed_out' )
A = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
A = SpeechEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
A = False
# add projection layer
A = nn.Parameter(projection_layer.weight )
A = nn.Parameter(projection_layer.bias )
A = create_vocab_dict(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , 'vocab.json' ) , 'w' ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
A = SpeechaTextaTokenizer(os.path.join(UpperCamelCase__ , 'vocab.json' ) )
tokenizer.save_pretrained(UpperCamelCase__ )
A = hf_wavavec.config.to_dict()
A = tokenizer.pad_token_id
A = tokenizer.bos_token_id
A = tokenizer.eos_token_id
A = 'speech_to_text_2'
A = 'wav2vec2'
A = SpeechEncoderDecoderConfig.from_dict(UpperCamelCase__ )
hf_wavavec.save_pretrained(UpperCamelCase__ )
feature_extractor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=10_224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
UpperCamelCase : List[Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 690 |
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 690 | 1 |
"""simple docstring"""
import functools
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
"""simple docstring"""
A = len(UpperCamelCase__ )
A = len(UpperCamelCase__ )
@functools.cache
def min_distance(UpperCamelCase__ , UpperCamelCase__ ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
A = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , UpperCamelCase__ ) , 1 + min_distance(UpperCamelCase__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def __snake_case ( UpperCamelCase__ ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
A = {'+', '-', '*', '/'}
A = []
for token in postfix_notation:
if token in operations:
A , A = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCamelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """convbert"""
def __init__( self : Optional[int] , _lowercase : List[Any]=30_522 , _lowercase : List[str]=768 , _lowercase : Optional[Any]=12 , _lowercase : Any=12 , _lowercase : str=3_072 , _lowercase : List[str]="gelu" , _lowercase : Dict=0.1 , _lowercase : Dict=0.1 , _lowercase : Any=512 , _lowercase : List[str]=2 , _lowercase : Tuple=0.0_2 , _lowercase : List[Any]=1e-12 , _lowercase : List[str]=1 , _lowercase : Tuple=0 , _lowercase : Any=2 , _lowercase : Union[str, Any]=768 , _lowercase : str=2 , _lowercase : Any=9 , _lowercase : Union[str, Any]=1 , _lowercase : Dict=None , **_lowercase : Union[str, Any] , ):
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase , )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = embedding_size
A = head_ratio
A = conv_kernel_size
A = num_groups
A = classifier_dropout
class lowerCamelCase__ ( UpperCAmelCase_ ):
@property
def __a ( self : str ):
if self.task == "multiple-choice":
A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 690 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCamelCase : Any = None
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : str = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCamelCase : Optional[int] = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCamelCase : str = "▁"
# Segments (not really needed)
UpperCamelCase : str = 0
UpperCamelCase : int = 1
UpperCamelCase : List[Any] = 2
UpperCamelCase : Union[str, Any] = 3
UpperCamelCase : Optional[Any] = 4
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = """left"""
lowerCAmelCase = XLNetTokenizer
def __init__( self : Tuple , _lowercase : List[Any]=None , _lowercase : Any=None , _lowercase : int=False , _lowercase : Tuple=True , _lowercase : Union[str, Any]=False , _lowercase : int="<s>" , _lowercase : Optional[int]="</s>" , _lowercase : Dict="<unk>" , _lowercase : Optional[int]="<sep>" , _lowercase : int="<pad>" , _lowercase : Dict="<cls>" , _lowercase : str="<mask>" , _lowercase : List[str]=["<eop>", "<eod>"] , **_lowercase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it
A = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
vocab_file=_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
A = 3
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = False if not self.vocab_file else True
def __a ( self : List[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 690 | 1 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {"vocab_file": "vocab.txt"}
UpperCamelCase : Optional[int] = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
UpperCamelCase : int = {
"facebook/esm2_t6_8M_UR50D": 1_024,
"facebook/esm2_t12_35M_UR50D": 1_024,
}
def __snake_case ( UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
with open(UpperCamelCase__ , 'r' ) as f:
A = f.read().splitlines()
return [l.strip() for l in lines]
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , _lowercase : int , _lowercase : Any="<unk>" , _lowercase : Dict="<cls>" , _lowercase : Optional[int]="<pad>" , _lowercase : str="<mask>" , _lowercase : Optional[int]="<eos>" , **_lowercase : Tuple , ):
super().__init__(**_lowercase )
A = load_vocab_file(_lowercase )
A = dict(enumerate(self.all_tokens ) )
A = {tok: ind for ind, tok in enumerate(self.all_tokens )}
A = unk_token
A = cls_token
A = pad_token
A = mask_token
A = eos_token
A = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __a ( self : Optional[int] , _lowercase : int ):
return self._id_to_token.get(_lowercase , self.unk_token )
def __a ( self : Union[str, Any] , _lowercase : str ):
return self._token_to_id.get(_lowercase , self._token_to_id.get(self.unk_token ) )
def __a ( self : int , _lowercase : Dict , **_lowercase : List[str] ):
return text.split()
def __a ( self : Union[str, Any] , _lowercase : int=False ):
return len(self._id_to_token )
def __a ( self : Dict ):
return {token: i for i, token in enumerate(self.all_tokens )}
def __a ( self : Optional[int] , _lowercase : str ):
return self._token_to_id.get(_lowercase , self._token_to_id.get(self.unk_token ) )
def __a ( self : List[str] , _lowercase : int ):
return self._id_to_token.get(_lowercase , self.unk_token )
def __a ( self : Dict , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.cls_token_id]
A = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __a ( self : List[str] , _lowercase : List , _lowercase : Optional[List] = None , _lowercase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
A = [1] + ([0] * len(_lowercase )) + [1]
if token_ids_a is not None:
mask += [0] * len(_lowercase ) + [1]
return mask
def __a ( self : Union[str, Any] , _lowercase : Any , _lowercase : Any ):
A = os.path.join(_lowercase , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(_lowercase , 'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def __a ( self : Tuple ):
return self.get_vocab_size(with_added_tokens=_lowercase )
def __a ( self : int , _lowercase : Union[List[str], List[AddedToken]] , _lowercase : bool = False ):
return super()._add_tokens(_lowercase , special_tokens=_lowercase )
| 690 |
"""simple docstring"""
from __future__ import annotations
UpperCamelCase : Any = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the reference grid
A = 1
A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the action grid
A = init[0]
A = init[1]
A = 0
A = g + heuristic[x][y] # cost from starting cell to destination cell
A = [[f, g, x, y]]
A = False # flag that is set when search is complete
A = False # flag set if we can't find expand
while not found and not resign:
if len(UpperCamelCase__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
A = cell.pop()
A = next_cell[2]
A = next_cell[3]
A = next_cell[1]
if x == goal[0] and y == goal[1]:
A = True
else:
for i in range(len(UpperCamelCase__ ) ): # to try out different valid actions
A = x + DIRECTIONS[i][0]
A = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCamelCase__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
A = g + cost
A = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
A = 1
A = i
A = []
A = goal[0]
A = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
A = x - DIRECTIONS[action[x][y]][0]
A = y - DIRECTIONS[action[x][y]][1]
A = xa
A = ya
invpath.append([x, y] )
A = []
for i in range(len(UpperCamelCase__ ) ):
path.append(invpath[len(UpperCamelCase__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCamelCase : Any = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCamelCase : List[Any] = [0, 0]
# all coordinates are given in format [y,x]
UpperCamelCase : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCamelCase : Tuple = 1
# the cost map which pushes the path closer to the goal
UpperCamelCase : Union[str, Any] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCamelCase : List[str] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCamelCase : Dict = 99
UpperCamelCase , UpperCamelCase : Optional[Any] = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 690 | 1 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __snake_case ( ) -> Optional[Any]:
"""simple docstring"""
A = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=UpperCamelCase__ , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=UpperCamelCase__ , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=UpperCamelCase__ )
return parser.parse_args()
def __snake_case ( ) -> Dict:
"""simple docstring"""
A = parse_args()
# Import training_script as a module.
A = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
A = script_fpath.stem
A = importlib.import_module(UpperCamelCase__ )
# Patch sys.argv
A = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 690 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : int = {"vocab_file": "sentencepiece.model"}
UpperCamelCase : Union[str, Any] = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
}
UpperCamelCase : Union[str, Any] = {
"google/rembert": 256,
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Dict=True , _lowercase : List[str]=True , _lowercase : int="[CLS]" , _lowercase : str="[SEP]" , _lowercase : List[str]="[UNK]" , _lowercase : List[Any]="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : List[str]="[CLS]" , _lowercase : Any="[MASK]" , **_lowercase : Optional[Any] , ):
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = spm.SentencePieceProcessor()
self.sp_model.Load(_lowercase )
@property
def __a ( self : Tuple ):
return len(self.sp_model )
def __a ( self : List[str] ):
A = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : List[str] , _lowercase : int ):
A = d
A = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __a ( self : Dict , _lowercase : Union[str, Any] , _lowercase : Dict=False ):
A = self.sp_model.EncodeAsPieces(_lowercase )
return pieces
def __a ( self : Dict , _lowercase : Tuple ):
return self.sp_model.PieceToId(_lowercase )
def __a ( self : str , _lowercase : Optional[int] ):
return self.sp_model.IdToPiece(_lowercase )
def __a ( self : Optional[int] , _lowercase : Optional[int] ):
A = self.sp_model.decode_pieces(_lowercase )
return out_string
def __a ( self : Optional[int] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def __a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error('Vocabulary path ({}) should be a directory'.format(_lowercase ) )
return
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 690 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase : Optional[int] = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Dict = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[Any] = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 690 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : List[str] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : List[Any] = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
UpperCamelCase : Any = {"mobilebert-uncased": 512}
UpperCamelCase : Any = {}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = MobileBertTokenizer
def __init__( self : Optional[int] , _lowercase : Optional[int]=None , _lowercase : Any=None , _lowercase : Optional[int]=True , _lowercase : int="[UNK]" , _lowercase : Dict="[SEP]" , _lowercase : Any="[PAD]" , _lowercase : str="[CLS]" , _lowercase : Union[str, Any]="[MASK]" , _lowercase : List[Any]=True , _lowercase : Any=None , **_lowercase : Optional[Any] , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , _lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowercase ) != tokenize_chinese_chars
):
A = getattr(_lowercase , normalizer_state.pop('type' ) )
A = do_lower_case
A = strip_accents
A = tokenize_chinese_chars
A = normalizer_class(**_lowercase )
A = do_lower_case
def __a ( self : List[Any] , _lowercase : Tuple , _lowercase : Any=None ):
A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Dict , _lowercase : str , _lowercase : Optional[str] = None ):
A = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 690 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Union[str, Any] ):
A = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
A = AutoTokenizer.from_pretrained('xlm-roberta-base' )
A = 'The dog is cute and lives in the garden house'
A = jnp.array([tokenizer.encode(_lowercase )] )
A = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
A = jnp.array(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
A = model(_lowercase )['last_hidden_state']
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , _lowercase , atol=1e-3 ) )
| 690 |
"""simple docstring"""
def __snake_case ( UpperCamelCase__ ) -> list[int]:
"""simple docstring"""
A = [0 for i in range(len(UpperCamelCase__ ) )]
# initialize interval's left pointer and right pointer
A , A = 0, 0
for i in range(1 , len(UpperCamelCase__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
A = min(right_pointer - i + 1 , z_result[i - left_pointer] )
A = min_edge
while go_next(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
A , A = i, i + z_result[i] - 1
return z_result
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
"""simple docstring"""
return i + z_result[i] < len(UpperCamelCase__ ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
"""simple docstring"""
A = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
A = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(UpperCamelCase__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class lowerCamelCase__ :
def __init__( self : List[str] , _lowercase : Any , _lowercase : Union[str, Any]=3 , _lowercase : Union[str, Any]=7 , _lowercase : List[str]=True , _lowercase : str=True , _lowercase : List[Any]=False , _lowercase : List[Any]=True , _lowercase : Union[str, Any]=99 , _lowercase : Optional[int]=32 , _lowercase : Any=5 , _lowercase : Tuple=4 , _lowercase : str=37 , _lowercase : str="gelu" , _lowercase : Optional[int]=0.1 , _lowercase : Any=0.1 , _lowercase : Dict=512 , _lowercase : Tuple=16 , _lowercase : Optional[Any]=2 , _lowercase : str=0.0_2 , _lowercase : Dict=3 , _lowercase : Any=4 , _lowercase : Optional[Any]=None , ):
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = scope
def __a ( self : int ):
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A = ids_tensor([self.batch_size] , self.num_choices )
A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self : Union[str, Any] ):
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=_lowercase , )
def __a ( self : str , _lowercase : Tuple , _lowercase : int , _lowercase : int , _lowercase : List[Any] , _lowercase : Optional[Any] , _lowercase : str , _lowercase : Union[str, Any] ):
A = FalconModel(config=_lowercase )
model.to(_lowercase )
model.eval()
A = model(_lowercase , attention_mask=_lowercase )
A = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : Any , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : str , _lowercase : str , _lowercase : int , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Optional[int] , ):
A = True
A = FalconModel(_lowercase )
model.to(_lowercase )
model.eval()
A = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )
A = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , )
A = model(_lowercase , attention_mask=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : Tuple , _lowercase : List[Any] , _lowercase : Dict , _lowercase : Optional[int] , _lowercase : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : str , _lowercase : Dict , _lowercase : int , _lowercase : int , ):
A = FalconForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
A = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self : List[str] , _lowercase : Any , _lowercase : Dict , _lowercase : Optional[int] , _lowercase : int , _lowercase : Dict , _lowercase : Union[str, Any] , _lowercase : List[Any] , _lowercase : str , _lowercase : List[str] , ):
A = True
A = True
A = FalconForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
# first forward pass
A = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , use_cache=_lowercase , )
A = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A = ids_tensor((self.batch_size, 3) , config.vocab_size )
A = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A = torch.cat([input_ids, next_tokens] , dim=-1 )
A = torch.cat([input_mask, next_mask] , dim=-1 )
A = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , output_hidden_states=_lowercase , )['hidden_states'][0]
A = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , past_key_values=_lowercase , output_hidden_states=_lowercase , )['hidden_states'][0]
# select random slice
A = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A = output_from_no_past[:, -3:, random_slice_idx].detach()
A = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1e-3 ) )
def __a ( self : Union[str, Any] ):
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase = (FalconForCausalLM,) if is_torch_available() else ()
lowerCAmelCase = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
def __a ( self : Tuple ):
A = FalconModelTester(self )
A = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def __a ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def __a ( self : Optional[int] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def __a ( self : Tuple ):
A , *A = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
A = alibi
self.model_tester.create_and_check_model(_lowercase , *_lowercase )
def __a ( self : Optional[Any] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = 3
A = input_dict['input_ids']
A = input_ids.ne(1 ).to(_lowercase )
A = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A = FalconForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
A = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __a ( self : str ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = 3
A = 'single_label_classification'
A = input_dict['input_ids']
A = input_ids.ne(1 ).to(_lowercase )
A = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A = FalconForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
A = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __a ( self : List[Any] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = input_dict['input_ids']
A = FalconForCausalLM(_lowercase )
model.to(_lowercase )
model.eval()
A = model(_lowercase , use_cache=_lowercase )
A = input_ids.shape[0]
A = model._convert_to_rw_cache(result.past_key_values )
A = model._convert_cache_to_standard_format(_lowercase , _lowercase )
for layer in range(len(_lowercase ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def __a ( self : Union[str, Any] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = 3
A = 'multi_label_classification'
A = input_dict['input_ids']
A = input_ids.ne(1 ).to(_lowercase )
A = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
A = FalconForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
A = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __a ( self : List[Any] ):
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(_lowercase , 'use_cache' ):
return
A = model_class(_lowercase ).to(_lowercase )
if "use_cache" not in inputs:
A = True
A = model(**_lowercase )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
A = (
getattr(_lowercase , 'decoder_layers' , _lowercase )
or getattr(_lowercase , 'num_decoder_layers' , _lowercase )
or config.num_hidden_layers
)
A = getattr(_lowercase , 'num_kv_heads' , config.num_attention_heads )
A = getattr(_lowercase , 'd_model' , config.hidden_size )
A = embed_dim // num_attention_heads
A = outputs['past_key_values']
self.assertEqual(len(_lowercase ) , _lowercase )
A , A = inputs['input_ids'].shape
for i in range(_lowercase ):
if config.new_decoder_architecture:
A = config.num_attention_heads
elif config.multi_query:
A = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Tuple ):
A = AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b' )
A = FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b' )
model.eval()
model.to(_lowercase )
A = tokenizer('My favorite food is' , return_tensors='pt' ).to(_lowercase )
A = (
'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'
)
A = model.generate(**_lowercase , do_sample=_lowercase , max_new_tokens=19 )
A = tokenizer.batch_decode(_lowercase )[0]
self.assertEqual(_lowercase , _lowercase )
@slow
def __a ( self : Union[str, Any] ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
A = AutoTokenizer.from_pretrained(_lowercase )
A = FalconForCausalLM.from_pretrained(_lowercase )
model.eval()
model.to(_lowercase )
A = tokenizer('My favorite food is' , return_tensors='pt' ).to(_lowercase )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**_lowercase , do_sample=_lowercase , max_new_tokens=4 )
model.generate(**_lowercase , do_sample=_lowercase , max_new_tokens=4 )
model.generate(**_lowercase , num_beams=2 , max_new_tokens=4 )
@slow
def __a ( self : int ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
A = AutoTokenizer.from_pretrained(_lowercase )
A = FalconForCausalLM.from_pretrained(_lowercase )
model.eval()
model.to(device=_lowercase )
A = tokenizer('My favorite food is' , return_tensors='pt' ).to(_lowercase )
# Test results are the same with and without cache
A = model.generate(**_lowercase , do_sample=_lowercase , max_new_tokens=20 , use_cache=_lowercase )
A = model.generate(**_lowercase , do_sample=_lowercase , max_new_tokens=20 , use_cache=_lowercase )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 690 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = LDMTextToImagePipeline
lowerCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
lowerCAmelCase = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase = False
def __a ( self : Dict ):
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
A = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
A = CLIPTextModel(_lowercase )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __a ( self : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any]=0 ):
if str(_lowercase ).startswith('mps' ):
A = torch.manual_seed(_lowercase )
else:
A = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : Any ):
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = LDMTextToImagePipeline(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_dummy_inputs(_lowercase )
A = pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
A = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : int , _lowercase : List[Any] , _lowercase : int=torch.floataa , _lowercase : int=0 ):
A = torch.manual_seed(_lowercase )
A = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
A = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : Union[str, Any] ):
A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_inputs(_lowercase )
A = pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
A = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
A = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : Tuple=torch.floataa , _lowercase : Optional[Any]=0 ):
A = torch.manual_seed(_lowercase )
A = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
A = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : List[str] ):
A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_inputs(_lowercase )
A = pipe(**_lowercase ).images[0]
A = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
A = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 690 | 1 |
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __snake_case ( UpperCamelCase__ ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
A = []
A = []
A = []
for rt in rc.restypes:
A = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
A = {name: i for i, name in enumerate(UpperCamelCase__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
A = torch.tensor(
UpperCamelCase__ , dtype=torch.intaa , device=protein['aatype'].device , )
A = torch.tensor(
UpperCamelCase__ , dtype=torch.intaa , device=protein['aatype'].device , )
A = torch.tensor(
UpperCamelCase__ , dtype=torch.floataa , device=protein['aatype'].device , )
A = protein['aatype'].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
A = restype_atomaa_to_atomaa[protein_aatype]
A = restype_atomaa_mask[protein_aatype]
A = residx_atomaa_mask
A = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
A = restype_atomaa_to_atomaa[protein_aatype]
A = residx_atomaa_to_atomaa.long()
# create the corresponding mask
A = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['aatype'].device )
for restype, restype_letter in enumerate(rc.restypes ):
A = rc.restype_atoa[restype_letter]
A = rc.residue_atoms[restype_name]
for atom_name in atom_names:
A = rc.atom_order[atom_name]
A = 1
A = restype_atomaa_mask[protein_aatype]
A = residx_atomaa_mask
return protein
def __snake_case ( UpperCamelCase__ ) -> Dict[str, np.ndarray]:
"""simple docstring"""
A = tree_map(lambda UpperCamelCase__ : torch.tensor(UpperCamelCase__ , device=batch['aatype'].device ) , UpperCamelCase__ , np.ndarray )
A = tensor_tree_map(lambda UpperCamelCase__ : np.array(UpperCamelCase__ ) , make_atomaa_masks(UpperCamelCase__ ) )
return out
| 690 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
A = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase , cache_dir=_lowercase )
A = [t[-1] for t in os.walk(os.path.join(_lowercase , os.listdir(_lowercase )[0] , 'snapshots' ) )]
A = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 4
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1e-3
assert np.abs(np.abs(_lowercase , dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1
A = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_lowercase ) == num_samples
def __a ( self : Dict ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1
def __a ( self : List[str] ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __a ( self : str ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __a ( self : Any ):
A = FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , set_alpha_to_one=_lowercase , steps_offset=1 , )
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_lowercase , safety_checker=_lowercase , )
A = scheduler.create_state()
A = scheduler_state
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1
def __a ( self : List[str] ):
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.device_count()
A = num_samples * [prompt]
A = jax.random.split(jax.random.PRNGKey(0 ) , _lowercase )
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , )
A = replicate(_lowercase )
A = pipeline.prepare_inputs(_lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
A = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , use_memory_efficient_attention=_lowercase , )
A = replicate(_lowercase )
A = pipeline.prepare_inputs(_lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
A = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 690 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase : List[str] = logging.get_logger(__name__)
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = ["""pixel_values"""]
def __init__( self : Tuple , _lowercase : bool = True , _lowercase : Optional[Dict[str, int]] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 255 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : List[str] , ):
super().__init__(**_lowercase )
A = size if size is not None else {'shortest_edge': 256}
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(_lowercase , param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self : Any , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Tuple , ):
A = get_size_dict(_lowercase , default_to_square=_lowercase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(_lowercase , size=size['shortest_edge'] , default_to_square=_lowercase )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : List[Any] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
A = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_lowercase , size=(size['height'], size['width']) , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : float , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Tuple ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : Any , _lowercase : ImageInput , _lowercase : Optional[bool] = None , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[float] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_lowercase : Any , ):
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(_lowercase , param_name='crop_size' )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
A = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_center_crop:
A = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images]
if do_rescale:
A = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
A = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
A = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def __a ( self : int , _lowercase : List[str] , _lowercase : List[Tuple] = None ):
A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(_lowercase ):
A = target_sizes.numpy()
A = []
for idx in range(len(_lowercase ) ):
A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=_lowercase )
A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
A = logits.argmax(dim=1 )
A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 690 |
"""simple docstring"""
import os
import sys
UpperCamelCase : Optional[int] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCamelCase : Dict = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
"""simple docstring"""
return AutoConfig.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
"""simple docstring"""
return AutoModel.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
| 690 | 1 |
"""simple docstring"""
import math
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> float:
"""simple docstring"""
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(UpperCamelCase__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law")
| 690 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase : List[str] = logging.get_logger(__name__)
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = ["""pixel_values"""]
def __init__( self : Tuple , _lowercase : bool = True , _lowercase : Optional[Dict[str, int]] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 255 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : List[str] , ):
super().__init__(**_lowercase )
A = size if size is not None else {'shortest_edge': 256}
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(_lowercase , param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self : Any , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Tuple , ):
A = get_size_dict(_lowercase , default_to_square=_lowercase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(_lowercase , size=size['shortest_edge'] , default_to_square=_lowercase )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : List[Any] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
A = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_lowercase , size=(size['height'], size['width']) , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : float , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Tuple ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : Any , _lowercase : ImageInput , _lowercase : Optional[bool] = None , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[float] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_lowercase : Any , ):
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(_lowercase , param_name='crop_size' )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
A = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_center_crop:
A = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images]
if do_rescale:
A = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
A = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
A = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def __a ( self : int , _lowercase : List[str] , _lowercase : List[Tuple] = None ):
A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(_lowercase ):
A = target_sizes.numpy()
A = []
for idx in range(len(_lowercase ) ):
A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=_lowercase )
A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
A = logits.argmax(dim=1 )
A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 690 | 1 |
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
A = int(UpperCamelCase__ )
assert noofclusters < len(UpperCamelCase__ )
# Find out the dimensionality
A = len(vectors[0] )
# Will help select random centroids from among the available vectors
A = list(range(len(UpperCamelCase__ ) ) )
shuffle(UpperCamelCase__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
A = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
A = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
A = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(UpperCamelCase__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
A = tf.placeholder('float64' , [dim] )
A = []
for centroid in centroids:
cent_assigns.append(tf.assign(UpperCamelCase__ , UpperCamelCase__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
A = [tf.Variable(0 ) for i in range(len(UpperCamelCase__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
A = tf.placeholder('int32' )
A = []
for assignment in assignments:
cluster_assigns.append(tf.assign(UpperCamelCase__ , UpperCamelCase__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
A = tf.placeholder('float' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
A = tf.reduce_mean(UpperCamelCase__ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
A = tf.placeholder('float' , [dim] )
A = tf.placeholder('float' , [dim] )
A = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(UpperCamelCase__ , UpperCamelCase__ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
A = tf.placeholder('float' , [noofclusters] )
A = tf.argmin(UpperCamelCase__ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
A = tf.initialize_all_variables()
# Initialize all variables
sess.run(UpperCamelCase__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
A = 100
for _ in range(UpperCamelCase__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(UpperCamelCase__ ) ):
A = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
A = [
sess.run(UpperCamelCase__ , feed_dict={va: vect, va: sess.run(UpperCamelCase__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
A = sess.run(
UpperCamelCase__ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(UpperCamelCase__ ):
# Collect all the vectors assigned to this cluster
A = [
vectors[i]
for i in range(len(UpperCamelCase__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
A = sess.run(
UpperCamelCase__ , feed_dict={mean_input: array(UpperCamelCase__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
A = sess.run(UpperCamelCase__ )
A = sess.run(UpperCamelCase__ )
return centroids, assignments
| 690 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __snake_case ( UpperCamelCase__ = "laptop" ) -> DataFrame:
"""simple docstring"""
A = f'https://www.amazon.in/laptop/s?k={product}'
A = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
A = BeautifulSoup(requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).text )
# Initialize a Pandas dataframe with the column titles
A = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
A = item.ha.text
A = 'https://www.amazon.in/' + item.ha.a['href']
A = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
A = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
A = 'Not available'
try:
A = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
A = ''
try:
A = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 100 )
except ValueError:
A = float('nan' )
except AttributeError:
pass
A = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A = ' '
A = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
UpperCamelCase : Any = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 690 | 1 |
"""simple docstring"""
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> Optional[Any]:
"""simple docstring"""
A = {
'7z': (seven_zip_file, SevenZipExtractor),
'bz2': (bza_file, BzipaExtractor),
'gzip': (gz_file, GzipExtractor),
'lz4': (lza_file, LzaExtractor),
'tar': (tar_file, TarExtractor),
'xz': (xz_file, XzExtractor),
'zip': (zip_file, ZipExtractor),
'zstd': (zstd_file, ZstdExtractor),
}
A , A = input_paths_and_base_extractors[compression_format]
if input_path is None:
A = f'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCamelCase__ )
assert base_extractor.is_extractable(UpperCamelCase__ )
A = tmp_path / ('extracted' if is_archive else 'extracted.txt')
base_extractor.extract(UpperCamelCase__ , UpperCamelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
A = file_path.read_text(encoding='utf-8' )
else:
A = output_path.read_text(encoding='utf-8' )
A = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> List[str]:
"""simple docstring"""
A = {
'7z': seven_zip_file,
'bz2': bza_file,
'gzip': gz_file,
'lz4': lza_file,
'tar': tar_file,
'xz': xz_file,
'zip': zip_file,
'zstd': zstd_file,
}
A = input_paths[compression_format]
if input_path is None:
A = f'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCamelCase__ )
A = Extractor.infer_extractor_format(UpperCamelCase__ )
assert extractor_format is not None
A = tmp_path / ('extracted' if is_archive else 'extracted.txt')
Extractor.extract(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
A = file_path.read_text(encoding='utf-8' )
else:
A = output_path.read_text(encoding='utf-8' )
A = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
import tarfile
A = tmp_path / 'data_dot_dot'
directory.mkdir()
A = directory / 'tar_file_with_dot_dot.tar'
with tarfile.TarFile(UpperCamelCase__ , 'w' ) as f:
f.add(UpperCamelCase__ , arcname=os.path.join('..' , text_file.name ) )
return path
@pytest.fixture
def __snake_case ( UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
import tarfile
A = tmp_path / 'data_sym_link'
directory.mkdir()
A = directory / 'tar_file_with_sym_link.tar'
os.symlink('..' , directory / 'subdir' , target_is_directory=UpperCamelCase__ )
with tarfile.TarFile(UpperCamelCase__ , 'w' ) as f:
f.add(str(directory / 'subdir' ) , arcname='subdir' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'insecure_tar_file, error_log' , [('tar_file_with_dot_dot', 'illegal path'), ('tar_file_with_sym_link', 'Symlink')] , )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
A = {
'tar_file_with_dot_dot': tar_file_with_dot_dot,
'tar_file_with_sym_link': tar_file_with_sym_link,
}
A = insecure_tar_files[insecure_tar_file]
A = tmp_path / 'extracted'
TarExtractor.extract(UpperCamelCase__ , UpperCamelCase__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def __snake_case ( UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
A = tmpdir / 'not_a_zip_file'
# From: https://github.com/python/cpython/pull/5053
A = (
B'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'
B'\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'
B'DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'
B'\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'
)
with not_a_zip_file.open('wb' ) as f:
f.write(UpperCamelCase__ )
assert zipfile.is_zipfile(str(UpperCamelCase__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(UpperCamelCase__ ) # but we're right
| 690 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self : List[str] , _lowercase : Optional[Any] , _lowercase : int=7 , _lowercase : List[str]=3 , _lowercase : Tuple=18 , _lowercase : Dict=30 , _lowercase : Any=400 , _lowercase : int=True , _lowercase : List[Any]=None , _lowercase : Tuple=True , _lowercase : List[Any]=False , _lowercase : str=True , _lowercase : List[str]=True , _lowercase : int=[0.5, 0.5, 0.5] , _lowercase : Optional[int]=[0.5, 0.5, 0.5] , ):
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size if size is not None else {'height': 18, 'width': 20}
A = do_thumbnail
A = do_align_axis
A = do_pad
A = do_normalize
A = image_mean
A = image_std
def __a ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = DonutImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
A = DonutImageProcessingTester(self )
@property
def __a ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Union[str, Any] ):
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , 'do_resize' ) )
self.assertTrue(hasattr(_lowercase , 'size' ) )
self.assertTrue(hasattr(_lowercase , 'do_thumbnail' ) )
self.assertTrue(hasattr(_lowercase , 'do_align_long_axis' ) )
self.assertTrue(hasattr(_lowercase , 'do_pad' ) )
self.assertTrue(hasattr(_lowercase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowercase , 'image_mean' ) )
self.assertTrue(hasattr(_lowercase , 'image_std' ) )
def __a ( self : int ):
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
A = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
A = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def __a ( self : Any ):
pass
@is_flaky()
def __a ( self : int ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __a ( self : List[str] ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __a ( self : List[Any] ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(_lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 690 | 1 |
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : List[Any] = "T5Config"
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """mt5"""
lowerCAmelCase = MTaConfig
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """mt5"""
lowerCAmelCase = MTaConfig
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """mt5"""
lowerCAmelCase = MTaConfig
| 690 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase__ :
def __init__( self : Optional[Any] , _lowercase : int=2 , _lowercase : Optional[Any]=3 , _lowercase : Any=64 , _lowercase : Tuple=None ):
A = np.random.default_rng(_lowercase )
A = length
A = rng.normal(size=(length,) ).astype(np.floataa )
A = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : str ):
return self.length
def __getitem__( self : List[str] , _lowercase : int ):
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase__ ( torch.nn.Module ):
def __init__( self : Optional[int] , _lowercase : Any=0 , _lowercase : List[Any]=0 , _lowercase : Optional[int]=False ):
super().__init__()
A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A = True
def __a ( self : Optional[Any] , _lowercase : str=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
A = False
return x * self.a[0] + self.b[0]
class lowerCamelCase__ ( torch.nn.Module ):
def __init__( self : Optional[Any] , _lowercase : Any=0 , _lowercase : List[str]=0 , _lowercase : str=False ):
super().__init__()
A = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
A = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
A = True
def __a ( self : int , _lowercase : Tuple=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
A = False
return x * self.a + self.b
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ = 16 ) -> Optional[Any]:
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
A = AutoTokenizer.from_pretrained('bert-base-cased' )
A = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
A = load_dataset('csv' , data_files=UpperCamelCase__ )
A = datasets['train'].unique('label' )
A = {v: i for i, v in enumerate(UpperCamelCase__ )}
def tokenize_function(UpperCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
A = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
if "label" in examples:
A = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(UpperCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(UpperCamelCase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
A = DataLoader(tokenized_datasets['train'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=2 )
A = DataLoader(tokenized_datasets['validation'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 690 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : Optional[int] = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """deta"""
lowerCAmelCase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Dict , _lowercase : Union[str, Any]=None , _lowercase : str=900 , _lowercase : Tuple=2_048 , _lowercase : int=6 , _lowercase : List[str]=2_048 , _lowercase : int=8 , _lowercase : Optional[Any]=6 , _lowercase : Dict=1_024 , _lowercase : Any=8 , _lowercase : Optional[int]=0.0 , _lowercase : str=True , _lowercase : str="relu" , _lowercase : List[Any]=256 , _lowercase : str=0.1 , _lowercase : List[Any]=0.0 , _lowercase : Dict=0.0 , _lowercase : Optional[int]=0.0_2 , _lowercase : int=1.0 , _lowercase : Optional[Any]=True , _lowercase : Any=False , _lowercase : Tuple="sine" , _lowercase : Union[str, Any]=5 , _lowercase : Optional[int]=4 , _lowercase : str=4 , _lowercase : List[str]=True , _lowercase : Any=300 , _lowercase : Any=True , _lowercase : Any=True , _lowercase : Tuple=1 , _lowercase : Tuple=5 , _lowercase : Dict=2 , _lowercase : List[str]=1 , _lowercase : Tuple=1 , _lowercase : Optional[int]=5 , _lowercase : str=2 , _lowercase : List[str]=0.1 , _lowercase : Optional[Any]=0.2_5 , **_lowercase : Optional[Any] , ):
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
A = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'] )
else:
if isinstance(_lowercase , _lowercase ):
A = backbone_config.pop('model_type' )
A = CONFIG_MAPPING[backbone_model_type]
A = config_class.from_dict(_lowercase )
A = backbone_config
A = num_queries
A = max_position_embeddings
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = init_xavier_std
A = encoder_layerdrop
A = auxiliary_loss
A = position_embedding_type
# deformable attributes
A = num_feature_levels
A = encoder_n_points
A = decoder_n_points
A = two_stage
A = two_stage_num_proposals
A = with_box_refine
A = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
A = class_cost
A = bbox_cost
A = giou_cost
# Loss coefficients
A = mask_loss_coefficient
A = dice_loss_coefficient
A = bbox_loss_coefficient
A = giou_loss_coefficient
A = eos_coefficient
A = focal_alpha
super().__init__(is_encoder_decoder=_lowercase , **_lowercase )
@property
def __a ( self : Optional[int] ):
return self.encoder_attention_heads
@property
def __a ( self : List[str] ):
return self.d_model
def __a ( self : Optional[Any] ):
A = copy.deepcopy(self.__dict__ )
A = self.backbone_config.to_dict()
A = self.__class__.model_type
return output
| 690 |
"""simple docstring"""
from __future__ import annotations
def __snake_case ( UpperCamelCase__ ) -> list[int]: # This function is recursive
"""simple docstring"""
A = len(UpperCamelCase__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
A = array[0]
A = False
A = 1
A = []
while not is_found and i < array_length:
if array[i] < pivot:
A = True
A = [element for element in array[i:] if element >= array[i]]
A = longest_subsequence(UpperCamelCase__ )
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
A = temp_array
else:
i += 1
A = [element for element in array[1:] if element >= pivot]
A = [pivot, *longest_subsequence(UpperCamelCase__ )]
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
A = tempfile.mkdtemp()
A = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
A = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
A = os.path.join(self.tmpdirname , _lowercase )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_lowercase , _lowercase )
def __a ( self : List[str] , **_lowercase : Union[str, Any] ):
return BertTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def __a ( self : Dict , **_lowercase : Optional[int] ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_lowercase )
def __a ( self : List[str] , **_lowercase : Tuple ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_lowercase )
def __a ( self : List[Any] ):
shutil.rmtree(self.tmpdirname )
def __a ( self : Dict ):
A = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __a ( self : Tuple ):
A = self.get_tokenizer()
A = self.get_rust_tokenizer()
A = self.get_image_processor()
A = AlignProcessor(tokenizer=_lowercase , image_processor=_lowercase )
processor_slow.save_pretrained(self.tmpdirname )
A = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_lowercase )
A = AlignProcessor(tokenizer=_lowercase , image_processor=_lowercase )
processor_fast.save_pretrained(self.tmpdirname )
A = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowercase )
self.assertIsInstance(processor_fast.tokenizer , _lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowercase )
self.assertIsInstance(processor_fast.image_processor , _lowercase )
def __a ( self : Optional[Any] ):
A = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
A = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0 )
A = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def __a ( self : Any ):
A = self.get_image_processor()
A = self.get_tokenizer()
A = AlignProcessor(tokenizer=_lowercase , image_processor=_lowercase )
A = self.prepare_image_inputs()
A = image_processor(_lowercase , return_tensors='np' )
A = processor(images=_lowercase , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __a ( self : Optional[Any] ):
A = self.get_image_processor()
A = self.get_tokenizer()
A = AlignProcessor(tokenizer=_lowercase , image_processor=_lowercase )
A = 'lower newer'
A = processor(text=_lowercase )
A = tokenizer(_lowercase , padding='max_length' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __a ( self : Tuple ):
A = self.get_image_processor()
A = self.get_tokenizer()
A = AlignProcessor(tokenizer=_lowercase , image_processor=_lowercase )
A = 'lower newer'
A = self.prepare_image_inputs()
A = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_lowercase ):
processor()
def __a ( self : Optional[int] ):
A = self.get_image_processor()
A = self.get_tokenizer()
A = AlignProcessor(tokenizer=_lowercase , image_processor=_lowercase )
A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A = processor.batch_decode(_lowercase )
A = tokenizer.batch_decode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def __a ( self : Union[str, Any] ):
A = self.get_image_processor()
A = self.get_tokenizer()
A = AlignProcessor(tokenizer=_lowercase , image_processor=_lowercase )
A = 'lower newer'
A = self.prepare_image_inputs()
A = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 690 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
UpperCamelCase : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
UpperCamelCase : Optional[int] = typing.Union[np.floataa, int, float] # noqa: UP007
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> VectorOut:
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(UpperCamelCase__ ) - np.asarray(UpperCamelCase__ )) ** 2 ) )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> VectorOut:
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(UpperCamelCase__ , UpperCamelCase__ ) ) ** (1 / 2)
if __name__ == "__main__":
def __snake_case ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
benchmark()
| 690 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {
"vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """glpn"""
def __init__( self : Dict , _lowercase : Any=3 , _lowercase : Dict=4 , _lowercase : Union[str, Any]=[2, 2, 2, 2] , _lowercase : Optional[Any]=[8, 4, 2, 1] , _lowercase : List[Any]=[32, 64, 160, 256] , _lowercase : Dict=[7, 3, 3, 3] , _lowercase : List[Any]=[4, 2, 2, 2] , _lowercase : Optional[int]=[1, 2, 5, 8] , _lowercase : str=[4, 4, 4, 4] , _lowercase : str="gelu" , _lowercase : Union[str, Any]=0.0 , _lowercase : List[str]=0.0 , _lowercase : Optional[Any]=0.0_2 , _lowercase : Dict=0.1 , _lowercase : Optional[Any]=1e-6 , _lowercase : str=64 , _lowercase : Union[str, Any]=10 , _lowercase : List[Any]=-1 , **_lowercase : Optional[int] , ):
super().__init__(**_lowercase )
A = num_channels
A = num_encoder_blocks
A = depths
A = sr_ratios
A = hidden_sizes
A = patch_sizes
A = strides
A = mlp_ratios
A = num_attention_heads
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = drop_path_rate
A = layer_norm_eps
A = decoder_hidden_size
A = max_depth
A = head_in_index
| 690 |
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
UpperCamelCase : List[Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False , ) -> Any:
"""simple docstring"""
output_path.parent.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , enable_onnx_checker=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
else:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
@torch.no_grad()
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> str:
"""simple docstring"""
A = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
A = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
A = 'cpu'
A = StableDiffusionPipeline.from_pretrained(UpperCamelCase__ , torch_dtype=UpperCamelCase__ ).to(UpperCamelCase__ )
A = Path(UpperCamelCase__ )
# TEXT ENCODER
A = pipeline.text_encoder.config.max_position_embeddings
A = pipeline.text_encoder.config.hidden_size
A = pipeline.tokenizer(
'A sample prompt' , padding='max_length' , max_length=pipeline.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors='pt' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=UpperCamelCase__ , dtype=torch.intaa )) , output_path=output_path / 'text_encoder' / 'model.onnx' , ordered_input_names=['input_ids'] , output_names=['last_hidden_state', 'pooler_output'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'sequence'},
} , opset=UpperCamelCase__ , )
del pipeline.text_encoder
# UNET
A = pipeline.unet.config.in_channels
A = pipeline.unet.config.sample_size
A = output_path / 'unet' / 'model.onnx'
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(2 ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(2 , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=UpperCamelCase__ , ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'] , output_names=['out_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'timestep': {0: 'batch'},
'encoder_hidden_states': {0: 'batch', 1: 'sequence'},
} , opset=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , )
A = str(unet_path.absolute().as_posix() )
A = os.path.dirname(UpperCamelCase__ )
A = onnx.load(UpperCamelCase__ )
# clean up existing tensor files
shutil.rmtree(UpperCamelCase__ )
os.mkdir(UpperCamelCase__ )
# collate external tensor files into one
onnx.save_model(
UpperCamelCase__ , UpperCamelCase__ , save_as_external_data=UpperCamelCase__ , all_tensors_to_one_file=UpperCamelCase__ , location='weights.pb' , convert_attribute=UpperCamelCase__ , )
del pipeline.unet
# VAE ENCODER
A = pipeline.vae
A = vae_encoder.config.in_channels
A = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
A = lambda UpperCamelCase__ , UpperCamelCase__ : vae_encoder.encode(UpperCamelCase__ , UpperCamelCase__ )[0].sample()
onnx_export(
UpperCamelCase__ , model_args=(
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=output_path / 'vae_encoder' / 'model.onnx' , ordered_input_names=['sample', 'return_dict'] , output_names=['latent_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=UpperCamelCase__ , )
# VAE DECODER
A = pipeline.vae
A = vae_decoder.config.latent_channels
A = vae_decoder.config.out_channels
# forward only through the decoder part
A = vae_encoder.decode
onnx_export(
UpperCamelCase__ , model_args=(
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=UpperCamelCase__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
A = pipeline.safety_checker
A = safety_checker.config.vision_config.num_channels
A = safety_checker.config.vision_config.image_size
A = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
) , output_path=output_path / 'safety_checker' / 'model.onnx' , ordered_input_names=['clip_input', 'images'] , output_names=['out_images', 'has_nsfw_concepts'] , dynamic_axes={
'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'},
} , opset=UpperCamelCase__ , )
del pipeline.safety_checker
A = OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker' )
A = pipeline.feature_extractor
else:
A = None
A = None
A = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet' ) , scheduler=pipeline.scheduler , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(UpperCamelCase__ )
print('ONNX pipeline saved to' , UpperCamelCase__ )
del pipeline
del onnx_pipeline
A = OnnxStableDiffusionPipeline.from_pretrained(UpperCamelCase__ , provider='CPUExecutionProvider' )
print('ONNX pipeline is loadable' )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
UpperCamelCase : str = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 690 | 1 |
"""simple docstring"""
import math
class lowerCamelCase__ :
def __a ( self : Union[str, Any] , _lowercase : list[list[float]] , _lowercase : list[int] ):
A = 0.0
A = 0.0
for i in range(len(_lowercase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def __a ( self : str , _lowercase : list[list[int | float]] , _lowercase : list[int] , _lowercase : int , _lowercase : float ):
for i in range(len(_lowercase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def __snake_case ( ) -> None:
"""simple docstring"""
A = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
A = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
A = SelfOrganizingMap()
A = 3
A = 0.5
for _ in range(UpperCamelCase__ ):
for j in range(len(UpperCamelCase__ ) ):
# training sample
A = training_samples[j]
# Compute the winning vector
A = self_organizing_map.get_winner(UpperCamelCase__ , UpperCamelCase__ )
# Update the winning vector
A = self_organizing_map.update(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# classify test sample
A = [0, 0, 0, 1]
A = self_organizing_map.get_winner(UpperCamelCase__ , UpperCamelCase__ )
# results
print(f'Clusters that the test sample belongs to : {winner}' )
print(f'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 690 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
UpperCamelCase : List[str] = Lock()
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
A = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
A = min(UpperCamelCase__ , UpperCamelCase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
A = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
A = max(UpperCamelCase__ , UpperCamelCase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(UpperCamelCase__ )
def __snake_case ( UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
A = []
A = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
A = Pipe()
A = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
A = temp_rs
A = temp_rr
for i in range(1 , len(UpperCamelCase__ ) - 1 ):
A = Pipe()
A = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
A = temp_rs
A = temp_rr
process_array_.append(
Process(
target=UpperCamelCase__ , args=(
len(UpperCamelCase__ ) - 1,
arr[len(UpperCamelCase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(UpperCamelCase__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(UpperCamelCase__ ) ):
A = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __snake_case ( ) -> Optional[Any]:
"""simple docstring"""
A = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*UpperCamelCase__ )
A = odd_even_transposition(UpperCamelCase__ )
print('Sorted List\n' )
print(*UpperCamelCase__ )
if __name__ == "__main__":
main()
| 690 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = StableDiffusionPanoramaPipeline
lowerCAmelCase = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def __a ( self : int ):
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
A = DDIMScheduler()
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
A = CLIPTextModel(_lowercase )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __a ( self : Optional[int] , _lowercase : Dict , _lowercase : int=0 ):
A = torch.manual_seed(_lowercase )
A = {
'prompt': 'a photo of the dolomites',
'generator': generator,
# Setting height and width to None to prevent OOMs on CPU.
'height': None,
'width': None,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : int ):
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = StableDiffusionPanoramaPipeline(**_lowercase )
A = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_dummy_inputs(_lowercase )
A = sd_pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A = np.array([0.6_1_8_6, 0.5_3_7_4, 0.4_9_1_5, 0.4_1_3_5, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_7, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __a ( self : Tuple ):
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def __a ( self : List[Any] ):
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 )
def __a ( self : str ):
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = StableDiffusionPanoramaPipeline(**_lowercase )
A = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_dummy_inputs(_lowercase )
A = 'french fries'
A = sd_pipe(**_lowercase , negative_prompt=_lowercase )
A = output.images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __a ( self : int ):
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = StableDiffusionPanoramaPipeline(**_lowercase )
A = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_dummy_inputs(_lowercase )
A = sd_pipe(**_lowercase , view_batch_size=2 )
A = output.images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __a ( self : int ):
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' )
A = StableDiffusionPanoramaPipeline(**_lowercase )
A = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_dummy_inputs(_lowercase )
A = sd_pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A = np.array([0.4_0_2_4, 0.6_5_1_0, 0.4_9_0_1, 0.5_3_7_8, 0.5_8_1_3, 0.5_6_2_2, 0.4_7_9_5, 0.4_4_6_7, 0.4_9_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __a ( self : Optional[int] ):
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = PNDMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , skip_prk_steps=_lowercase )
A = StableDiffusionPanoramaPipeline(**_lowercase )
A = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_dummy_inputs(_lowercase )
A = sd_pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A = np.array([0.6_3_9_1, 0.6_2_9_1, 0.4_8_6_1, 0.5_1_3_4, 0.5_5_5_2, 0.4_5_7_8, 0.5_0_3_2, 0.5_0_2_3, 0.4_5_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : List[str] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : List[Any] , _lowercase : Any=0 ):
A = torch.manual_seed(_lowercase )
A = {
'prompt': 'a photo of the dolomites',
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def __a ( self : List[str] ):
A = 'stabilityai/stable-diffusion-2-base'
A = DDIMScheduler.from_pretrained(_lowercase , subfolder='scheduler' )
A = StableDiffusionPanoramaPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
A = self.get_inputs()
A = pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2_048, 3)
A = np.array(
[
0.3_6_9_6_8_3_9_2,
0.2_7_0_2_5_3_7_2,
0.3_2_4_4_6_7_6_6,
0.2_8_3_7_9_3_8_7,
0.3_6_3_6_3_2_7_4,
0.3_0_7_3_3_3_4_7,
0.2_7_1_0_0_0_2_7,
0.2_7_0_5_4_1_2_5,
0.2_5_5_3_6_0_9_6,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def __a ( self : List[Any] ):
A = StableDiffusionPanoramaPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-base' , safety_checker=_lowercase )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
A = self.get_inputs()
A = pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2_048, 3)
A = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __a ( self : str ):
A = 0
def callback_fn(_lowercase : int , _lowercase : int , _lowercase : torch.FloatTensor ) -> None:
A = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
A = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
A = latents[0, -3:, -3:, -1]
A = np.array(
[
0.1_8_6_8_1_8_6_9,
0.3_3_9_0_7_8_1_6,
0.5_3_6_1_2_7_6,
0.1_4_4_3_2_8_6_5,
-0.0_2_8_5_6_6_1_1,
-0.7_3_9_4_1_1_2_3,
0.2_3_3_9_7_9_8_7,
0.4_7_3_2_2_6_8_2,
-0.3_7_8_2_3_1_6_4,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
A = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
A = latents[0, -3:, -3:, -1]
A = np.array(
[
0.1_8_5_3_9_6_4_5,
0.3_3_9_8_7_2_4_8,
0.5_3_7_8_5_5_9,
0.1_4_4_3_7_1_4_2,
-0.0_2_4_5_5_2_6_1,
-0.7_3_3_8_3_1_7,
0.2_3_9_9_0_7_5_5,
0.4_7_3_5_6_2_7_2,
-0.3_7_8_6_5_0_5,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
A = False
A = 'stabilityai/stable-diffusion-2-base'
A = DDIMScheduler.from_pretrained(_lowercase , subfolder='scheduler' )
A = StableDiffusionPanoramaPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
A = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
A = self.get_inputs()
pipe(**_lowercase , callback=_lowercase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __a ( self : Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A = 'stabilityai/stable-diffusion-2-base'
A = DDIMScheduler.from_pretrained(_lowercase , subfolder='scheduler' )
A = StableDiffusionPanoramaPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
A = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A = self.get_inputs()
A = pipe(**_lowercase )
A = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 690 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
UpperCamelCase : int = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
UpperCamelCase : List[Any] = dataset.iloc[:, 1:2].values
UpperCamelCase : Any = dataset.iloc[:, 2].values
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = train_test_split(X, y, test_size=0.2, random_state=0)
UpperCamelCase : List[str] = PolynomialFeatures(degree=4)
UpperCamelCase : Optional[int] = poly_reg.fit_transform(X)
UpperCamelCase : List[Any] = LinearRegression()
pol_reg.fit(X_poly, y)
def __snake_case ( ) -> Optional[int]:
"""simple docstring"""
plt.scatter(UpperCamelCase__ , UpperCamelCase__ , color='red' )
plt.plot(UpperCamelCase__ , pol_reg.predict(poly_reg.fit_transform(UpperCamelCase__ ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 690 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase : Dict = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[Any] = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : str = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : int = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[Any] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 690 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = ["""pixel_values"""]
def __init__( self : List[str] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 255 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : bool = True , **_lowercase : Tuple , ):
super().__init__(**_lowercase )
A = size if size is not None else {'shortest_edge': 224}
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(_lowercase , default_to_square=_lowercase , param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A = image_std if image_std is not None else OPENAI_CLIP_STD
A = do_convert_rgb
def __a ( self : str , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : List[str] , ):
A = get_size_dict(_lowercase , default_to_square=_lowercase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(_lowercase , size=size['shortest_edge'] , default_to_square=_lowercase )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
A = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_lowercase , size=(size['height'], size['width']) , data_format=_lowercase , **_lowercase )
def __a ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : List[str] , ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Union[str, Any] , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : Optional[int] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : int = None , _lowercase : bool = None , _lowercase : float = None , _lowercase : bool = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : bool = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[ChannelDimension] = ChannelDimension.FIRST , **_lowercase : int , ):
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(_lowercase , param_name='size' , default_to_square=_lowercase )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(_lowercase , param_name='crop_size' , default_to_square=_lowercase )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A = [convert_to_rgb(_lowercase ) for image in images]
# All transformations expect numpy arrays.
A = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
A = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_center_crop:
A = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images]
if do_rescale:
A = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
A = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
A = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 690 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : List[Any] = {"configuration_timm_backbone": ["TimmBackboneConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[Any] = ["TimmBackbone"]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
UpperCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 690 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Union[str, Any] ):
A = torch.nn.Linear(10 , 10 )
A = torch.optim.SGD(model.parameters() , 0.1 )
A = Accelerator()
A = accelerator.prepare(_lowercase )
try:
pickle.loads(pickle.dumps(_lowercase ) )
except Exception as e:
self.fail(f'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state()
| 690 | 1 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
UpperCamelCase : List[Any] = False
class lowerCamelCase__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Any ):
A = VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = 'A painting of a squirrel eating a burger '
A = torch.manual_seed(0 )
A = pipe(
prompt=_lowercase , generator=_lowercase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowercase )
A = VersatileDiffusionTextToImagePipeline.from_pretrained(_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = generator.manual_seed(0 )
A = pipe(
prompt=_lowercase , generator=_lowercase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __a ( self : str ):
A = VersatileDiffusionTextToImagePipeline.from_pretrained(
'shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = 'A painting of a squirrel eating a burger '
A = torch.manual_seed(0 )
A = pipe(
prompt=_lowercase , generator=_lowercase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
A = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 690 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """convbert"""
def __init__( self : Optional[int] , _lowercase : List[Any]=30_522 , _lowercase : List[str]=768 , _lowercase : Optional[Any]=12 , _lowercase : Any=12 , _lowercase : str=3_072 , _lowercase : List[str]="gelu" , _lowercase : Dict=0.1 , _lowercase : Dict=0.1 , _lowercase : Any=512 , _lowercase : List[str]=2 , _lowercase : Tuple=0.0_2 , _lowercase : List[Any]=1e-12 , _lowercase : List[str]=1 , _lowercase : Tuple=0 , _lowercase : Any=2 , _lowercase : Union[str, Any]=768 , _lowercase : str=2 , _lowercase : Any=9 , _lowercase : Union[str, Any]=1 , _lowercase : Dict=None , **_lowercase : Union[str, Any] , ):
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase , )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = embedding_size
A = head_ratio
A = conv_kernel_size
A = num_groups
A = classifier_dropout
class lowerCamelCase__ ( UpperCAmelCase_ ):
@property
def __a ( self : str ):
if self.task == "multiple-choice":
A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 690 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : int = logging.get_logger(__name__)
def __snake_case ( UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
A = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
A = DetaConfig(
backbone_config=UpperCamelCase__ , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=UpperCamelCase__ , with_box_refine=UpperCamelCase__ , two_stage=UpperCamelCase__ , )
# set labels
A = 'huggingface/label-files'
if "o365" in model_name:
A = 366
A = 'object365-id2label.json'
else:
A = 91
A = 'coco-detection-id2label.json'
A = num_labels
A = json.load(open(cached_download(hf_hub_url(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) ) , 'r' ) )
A = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
A = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.reduction.weight', f'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.weight', f'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.bias', f'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
"""simple docstring"""
A = dct.pop(UpperCamelCase__ )
A = val
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> Any:
"""simple docstring"""
A = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
A = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
A = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
A = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A = in_proj_weight[:dim, :]
A = in_proj_bias[: dim]
A = in_proj_weight[
dim : dim * 2, :
]
A = in_proj_bias[
dim : dim * 2
]
A = in_proj_weight[
-dim :, :
]
A = in_proj_bias[-dim :]
# fmt: on
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
A = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
A = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
A = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A = in_proj_weight[:hidden_size, :]
A = in_proj_bias[:hidden_size]
A = in_proj_weight[
hidden_size : hidden_size * 2, :
]
A = in_proj_bias[hidden_size : hidden_size * 2]
A = in_proj_weight[-hidden_size:, :]
A = in_proj_bias[-hidden_size:]
def __snake_case ( ) -> List[str]:
"""simple docstring"""
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
A = get_deta_config(UpperCamelCase__ )
# load original state dict
if model_name == "deta-swin-large":
A = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
A = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(f'Model name {model_name} not supported' )
A = torch.load(UpperCamelCase__ , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(UpperCamelCase__ , param.shape )
# rename keys
A = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_swin_q_k_v(UpperCamelCase__ , config.backbone_config )
read_in_decoder_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
A = state_dict.pop(UpperCamelCase__ )
A = val
if "input_proj" in key:
A = state_dict.pop(UpperCamelCase__ )
A = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
A = state_dict.pop(UpperCamelCase__ )
A = val
# finally, create HuggingFace model and load state dict
A = DetaForObjectDetection(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
A = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(UpperCamelCase__ )
# load image processor
A = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
A = prepare_img()
A = processor(images=UpperCamelCase__ , return_tensors='pt' )
A = encoding['pixel_values']
A = model(pixel_values.to(UpperCamelCase__ ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
A = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
A = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
A = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
A = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(UpperCamelCase__ ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(UpperCamelCase__ ) , atol=1E-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(f'jozhang97/{model_name}' )
processor.push_to_hub(f'jozhang97/{model_name}' )
if __name__ == "__main__":
UpperCamelCase : str = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="deta-swin-large",
choices=["deta-swin-large", "deta-swin-large-o365"],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCamelCase : Dict = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 690 |
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 690 | 1 |
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def __snake_case ( UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
A = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
f'{test_file} instead.' )
A = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(f'`test_file` should be a python file. Got {test_fn} instead.' )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
f'`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.' )
A = components[:-1] + [test_fn.replace('.py' , '' )]
A = '.'.join(UpperCamelCase__ )
return test_module_path
def __snake_case ( UpperCamelCase__ ) -> str:
"""simple docstring"""
A = get_module_path(UpperCamelCase__ )
A = importlib.import_module(UpperCamelCase__ )
return test_module
def __snake_case ( UpperCamelCase__ ) -> str:
"""simple docstring"""
A = []
A = get_test_module(UpperCamelCase__ )
for attr in dir(UpperCamelCase__ ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(UpperCamelCase__ , UpperCamelCase__ ) )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x.__name__ )
def __snake_case ( UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
A = []
A = get_test_module(UpperCamelCase__ )
for attr in dir(UpperCamelCase__ ):
A = getattr(UpperCamelCase__ , UpperCamelCase__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
A = getattr(UpperCamelCase__ , 'all_model_classes' , [] )
if len(UpperCamelCase__ ) > 0:
test_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x.__name__ )
def __snake_case ( UpperCamelCase__ ) -> Dict:
"""simple docstring"""
A = get_test_classes(UpperCamelCase__ )
A = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x.__name__ )
def __snake_case ( UpperCamelCase__ ) -> Dict:
"""simple docstring"""
A = test_class()
if hasattr(UpperCamelCase__ , 'setUp' ):
test.setUp()
A = None
if hasattr(UpperCamelCase__ , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
A = test.model_tester.__class__
return model_tester
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> str:
"""simple docstring"""
A = get_test_classes(UpperCamelCase__ )
A = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x.__name__ )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
A = get_test_classes_for_model(UpperCamelCase__ , UpperCamelCase__ )
A = []
for test_class in test_classes:
A = get_model_tester_from_test_class(UpperCamelCase__ )
if tester_class is not None:
tester_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x.__name__ )
def __snake_case ( UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
A = get_test_classes(UpperCamelCase__ )
A = {test_class: get_model_tester_from_test_class(UpperCamelCase__ ) for test_class in test_classes}
return test_tester_mapping
def __snake_case ( UpperCamelCase__ ) -> int:
"""simple docstring"""
A = get_model_classes(UpperCamelCase__ )
A = {
model_class: get_test_classes_for_model(UpperCamelCase__ , UpperCamelCase__ ) for model_class in model_classes
}
return model_test_mapping
def __snake_case ( UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
A = get_model_classes(UpperCamelCase__ )
A = {
model_class: get_tester_classes_for_model(UpperCamelCase__ , UpperCamelCase__ ) for model_class in model_classes
}
return model_to_tester_mapping
def __snake_case ( UpperCamelCase__ ) -> Dict:
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return o
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return o.__name__
elif isinstance(UpperCamelCase__ , (list, tuple) ):
return [to_json(UpperCamelCase__ ) for x in o]
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return {to_json(UpperCamelCase__ ): to_json(UpperCamelCase__ ) for k, v in o.items()}
else:
return o
| 690 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def __snake_case ( UpperCamelCase__ ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
A = {'+', '-', '*', '/'}
A = []
for token in postfix_notation:
if token in operations:
A , A = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCamelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 1 |
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 690 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCamelCase : Any = None
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : str = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCamelCase : Optional[int] = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCamelCase : str = "▁"
# Segments (not really needed)
UpperCamelCase : str = 0
UpperCamelCase : int = 1
UpperCamelCase : List[Any] = 2
UpperCamelCase : Union[str, Any] = 3
UpperCamelCase : Optional[Any] = 4
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = """left"""
lowerCAmelCase = XLNetTokenizer
def __init__( self : Tuple , _lowercase : List[Any]=None , _lowercase : Any=None , _lowercase : int=False , _lowercase : Tuple=True , _lowercase : Union[str, Any]=False , _lowercase : int="<s>" , _lowercase : Optional[int]="</s>" , _lowercase : Dict="<unk>" , _lowercase : Optional[int]="<sep>" , _lowercase : int="<pad>" , _lowercase : Dict="<cls>" , _lowercase : str="<mask>" , _lowercase : List[str]=["<eop>", "<eod>"] , **_lowercase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it
A = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
vocab_file=_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
A = 3
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = False if not self.vocab_file else True
def __a ( self : List[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 690 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : List[Any] , _lowercase : int , _lowercase : int ):
A = jnp.ones((batch_size, length) ) / length
return scores
def __a ( self : Optional[int] ):
A = None
A = 20
A = self._get_uniform_logits(batch_size=2 , length=_lowercase )
# tweak scores to not be uniform anymore
A = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
A = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
A = jax.nn.softmax(_lowercase , axis=-1 )
A = FlaxTemperatureLogitsWarper(temperature=0.5 )
A = FlaxTemperatureLogitsWarper(temperature=1.3 )
A = jax.nn.softmax(temp_dist_warper_sharper(_lowercase , scores.copy() , cur_len=_lowercase ) , axis=-1 )
A = jax.nn.softmax(temp_dist_warper_smoother(_lowercase , scores.copy() , cur_len=_lowercase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def __a ( self : Tuple ):
A = None
A = 10
A = 2
# create ramp distribution
A = np.broadcast_to(np.arange(_lowercase )[None, :] , (batch_size, vocab_size) ).copy()
A = ramp_logits[1:, : vocab_size // 2] + vocab_size
A = FlaxTopKLogitsWarper(3 )
A = top_k_warp(_lowercase , _lowercase , cur_len=_lowercase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
A = 5
A = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
A = np.broadcast_to(np.arange(_lowercase )[None, :] , (batch_size, length) ).copy()
A = top_k_warp_safety_check(_lowercase , _lowercase , cur_len=_lowercase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def __a ( self : Dict ):
A = None
A = 10
A = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
A = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
A = FlaxTopPLogitsWarper(0.8 )
A = np.exp(top_p_warp(_lowercase , _lowercase , cur_len=_lowercase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
A = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1e-3 ) )
# check edge cases with negative and extreme logits
A = np.broadcast_to(np.arange(_lowercase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
A = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
A = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
A = top_p_warp(_lowercase , _lowercase , cur_len=_lowercase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def __a ( self : Union[str, Any] ):
A = 20
A = 4
A = 0
A = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowercase )
# check that min length is applied at length 5
A = ids_tensor((batch_size, 20) , vocab_size=20 )
A = 5
A = self._get_uniform_logits(_lowercase , _lowercase )
A = min_dist_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('inf' )] )
# check that min length is not applied anymore at length 15
A = self._get_uniform_logits(_lowercase , _lowercase )
A = 15
A = min_dist_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertFalse(jnp.isinf(_lowercase ).any() )
def __a ( self : str ):
A = 20
A = 4
A = 0
A = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowercase )
# check that all scores are -inf except the bos_token_id score
A = ids_tensor((batch_size, 1) , vocab_size=20 )
A = 1
A = self._get_uniform_logits(_lowercase , _lowercase )
A = logits_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
A = 3
A = self._get_uniform_logits(_lowercase , _lowercase )
A = logits_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertFalse(jnp.isinf(_lowercase ).any() )
def __a ( self : List[str] ):
A = 20
A = 4
A = 0
A = 5
A = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowercase , eos_token_id=_lowercase )
# check that all scores are -inf except the eos_token_id when max_length is reached
A = ids_tensor((batch_size, 4) , vocab_size=20 )
A = 4
A = self._get_uniform_logits(_lowercase , _lowercase )
A = logits_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
A = 3
A = self._get_uniform_logits(_lowercase , _lowercase )
A = logits_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertFalse(jnp.isinf(_lowercase ).any() )
def __a ( self : Dict ):
A = 4
A = 10
A = 15
A = 2
A = 1
A = 15
# dummy input_ids and scores
A = ids_tensor((batch_size, sequence_length) , _lowercase )
A = input_ids.copy()
A = self._get_uniform_logits(_lowercase , _lowercase )
A = scores.copy()
# instantiate all dist processors
A = FlaxTemperatureLogitsWarper(temperature=0.5 )
A = FlaxTopKLogitsWarper(3 )
A = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
A = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowercase )
A = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowercase )
A = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowercase , eos_token_id=_lowercase )
A = 10
# no processor list
A = temp_dist_warp(_lowercase , _lowercase , cur_len=_lowercase )
A = top_k_warp(_lowercase , _lowercase , cur_len=_lowercase )
A = top_p_warp(_lowercase , _lowercase , cur_len=_lowercase )
A = min_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
A = bos_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
A = eos_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
# with processor list
A = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
A = processor(_lowercase , _lowercase , cur_len=_lowercase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowercase , _lowercase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def __a ( self : str ):
A = 4
A = 10
A = 15
A = 2
A = 1
A = 15
# dummy input_ids and scores
A = ids_tensor((batch_size, sequence_length) , _lowercase )
A = input_ids.copy()
A = self._get_uniform_logits(_lowercase , _lowercase )
A = scores.copy()
# instantiate all dist processors
A = FlaxTemperatureLogitsWarper(temperature=0.5 )
A = FlaxTopKLogitsWarper(3 )
A = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
A = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowercase )
A = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowercase )
A = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowercase , eos_token_id=_lowercase )
A = 10
# no processor list
def run_no_processor_list(_lowercase : Optional[int] , _lowercase : Union[str, Any] , _lowercase : Dict ):
A = temp_dist_warp(_lowercase , _lowercase , cur_len=_lowercase )
A = top_k_warp(_lowercase , _lowercase , cur_len=_lowercase )
A = top_p_warp(_lowercase , _lowercase , cur_len=_lowercase )
A = min_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
A = bos_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
A = eos_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
return scores
# with processor list
def run_processor_list(_lowercase : Tuple , _lowercase : str , _lowercase : Optional[Any] ):
A = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
A = processor(_lowercase , _lowercase , cur_len=_lowercase )
return scores
A = jax.jit(_lowercase )
A = jax.jit(_lowercase )
A = jitted_run_no_processor_list(_lowercase , _lowercase , _lowercase )
A = jitted_run_processor_list(_lowercase , _lowercase , _lowercase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowercase , _lowercase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 690 |
"""simple docstring"""
from __future__ import annotations
UpperCamelCase : Any = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the reference grid
A = 1
A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the action grid
A = init[0]
A = init[1]
A = 0
A = g + heuristic[x][y] # cost from starting cell to destination cell
A = [[f, g, x, y]]
A = False # flag that is set when search is complete
A = False # flag set if we can't find expand
while not found and not resign:
if len(UpperCamelCase__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
A = cell.pop()
A = next_cell[2]
A = next_cell[3]
A = next_cell[1]
if x == goal[0] and y == goal[1]:
A = True
else:
for i in range(len(UpperCamelCase__ ) ): # to try out different valid actions
A = x + DIRECTIONS[i][0]
A = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCamelCase__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
A = g + cost
A = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
A = 1
A = i
A = []
A = goal[0]
A = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
A = x - DIRECTIONS[action[x][y]][0]
A = y - DIRECTIONS[action[x][y]][1]
A = xa
A = ya
invpath.append([x, y] )
A = []
for i in range(len(UpperCamelCase__ ) ):
path.append(invpath[len(UpperCamelCase__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCamelCase : Any = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCamelCase : List[Any] = [0, 0]
# all coordinates are given in format [y,x]
UpperCamelCase : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCamelCase : Tuple = 1
# the cost map which pushes the path closer to the goal
UpperCamelCase : Union[str, Any] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCamelCase : List[str] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCamelCase : Dict = 99
UpperCamelCase , UpperCamelCase : Optional[Any] = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 690 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : List[str] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : List[Any] = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
UpperCamelCase : Any = {"mobilebert-uncased": 512}
UpperCamelCase : Any = {}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = MobileBertTokenizer
def __init__( self : Optional[int] , _lowercase : Optional[int]=None , _lowercase : Any=None , _lowercase : Optional[int]=True , _lowercase : int="[UNK]" , _lowercase : Dict="[SEP]" , _lowercase : Any="[PAD]" , _lowercase : str="[CLS]" , _lowercase : Union[str, Any]="[MASK]" , _lowercase : List[Any]=True , _lowercase : Any=None , **_lowercase : Optional[Any] , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , _lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowercase ) != tokenize_chinese_chars
):
A = getattr(_lowercase , normalizer_state.pop('type' ) )
A = do_lower_case
A = strip_accents
A = tokenize_chinese_chars
A = normalizer_class(**_lowercase )
A = do_lower_case
def __a ( self : List[Any] , _lowercase : Tuple , _lowercase : Any=None ):
A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Dict , _lowercase : str , _lowercase : Optional[str] = None ):
A = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 690 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : int = {"vocab_file": "sentencepiece.model"}
UpperCamelCase : Union[str, Any] = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
}
UpperCamelCase : Union[str, Any] = {
"google/rembert": 256,
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Dict=True , _lowercase : List[str]=True , _lowercase : int="[CLS]" , _lowercase : str="[SEP]" , _lowercase : List[str]="[UNK]" , _lowercase : List[Any]="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : List[str]="[CLS]" , _lowercase : Any="[MASK]" , **_lowercase : Optional[Any] , ):
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = spm.SentencePieceProcessor()
self.sp_model.Load(_lowercase )
@property
def __a ( self : Tuple ):
return len(self.sp_model )
def __a ( self : List[str] ):
A = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : List[str] , _lowercase : int ):
A = d
A = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __a ( self : Dict , _lowercase : Union[str, Any] , _lowercase : Dict=False ):
A = self.sp_model.EncodeAsPieces(_lowercase )
return pieces
def __a ( self : Dict , _lowercase : Tuple ):
return self.sp_model.PieceToId(_lowercase )
def __a ( self : str , _lowercase : Optional[int] ):
return self.sp_model.IdToPiece(_lowercase )
def __a ( self : Optional[int] , _lowercase : Optional[int] ):
A = self.sp_model.decode_pieces(_lowercase )
return out_string
def __a ( self : Optional[int] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def __a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error('Vocabulary path ({}) should be a directory'.format(_lowercase ) )
return
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 690 | 1 |
"""simple docstring"""
import math
import sys
import cva
import numpy as np
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> np.ndarray:
"""simple docstring"""
A = math.sqrt(UpperCamelCase__ )
A = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> np.ndarray:
"""simple docstring"""
A = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> np.ndarray:
"""simple docstring"""
A = np.zeros((kernel_size, kernel_size) )
for i in range(0 , UpperCamelCase__ ):
for j in range(0 , UpperCamelCase__ ):
A = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> np.ndarray:
"""simple docstring"""
A = np.zeros(img.shape )
A = get_gauss_kernel(UpperCamelCase__ , UpperCamelCase__ )
A , A = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
A = get_slice(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A = img_s - img_s[kernel_size // 2, kernel_size // 2]
A = vec_gaussian(UpperCamelCase__ , UpperCamelCase__ )
A = np.multiply(UpperCamelCase__ , UpperCamelCase__ )
A = np.multiply(UpperCamelCase__ , UpperCamelCase__ )
A = np.sum(UpperCamelCase__ ) / np.sum(UpperCamelCase__ )
A = val
return imga
def __snake_case ( UpperCamelCase__ ) -> tuple:
"""simple docstring"""
A = args[1] if args[1:] else '../image_data/lena.jpg'
A = float(args[2] ) if args[2:] else 1.0
A = float(args[3] ) if args[3:] else 1.0
if args[4:]:
A = int(args[4] )
A = kernel_size + abs(kernel_size % 2 - 1 )
else:
A = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = parse_args(sys.argv)
UpperCamelCase : Dict = cva.imread(filename, 0)
cva.imshow("input image", img)
UpperCamelCase : int = img / 255
UpperCamelCase : str = out.astype("float32")
UpperCamelCase : Optional[Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
UpperCamelCase : Optional[Any] = out * 255
UpperCamelCase : Tuple = np.uinta(out)
cva.imshow("output image", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 690 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : List[str] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : List[Any] = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
UpperCamelCase : Any = {"mobilebert-uncased": 512}
UpperCamelCase : Any = {}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = MobileBertTokenizer
def __init__( self : Optional[int] , _lowercase : Optional[int]=None , _lowercase : Any=None , _lowercase : Optional[int]=True , _lowercase : int="[UNK]" , _lowercase : Dict="[SEP]" , _lowercase : Any="[PAD]" , _lowercase : str="[CLS]" , _lowercase : Union[str, Any]="[MASK]" , _lowercase : List[Any]=True , _lowercase : Any=None , **_lowercase : Optional[Any] , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , _lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowercase ) != tokenize_chinese_chars
):
A = getattr(_lowercase , normalizer_state.pop('type' ) )
A = do_lower_case
A = strip_accents
A = tokenize_chinese_chars
A = normalizer_class(**_lowercase )
A = do_lower_case
def __a ( self : List[Any] , _lowercase : Tuple , _lowercase : Any=None ):
A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Dict , _lowercase : str , _lowercase : Optional[str] = None ):
A = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 690 | 1 |
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
UpperCamelCase : Optional[Any] = "docs/source/en/_toctree.yml"
def __snake_case ( UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
A = defaultdict(UpperCamelCase__ )
A = []
A = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(UpperCamelCase__ )
A = new_doc_list
A = [key for key, value in counts.items() if value > 1]
A = []
for duplicate_key in duplicates:
A = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(UpperCamelCase__ ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
A = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(UpperCamelCase__ ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(UpperCamelCase__ )
# Sort
return overview_doc
def __snake_case ( UpperCamelCase__=False ) -> Dict:
"""simple docstring"""
with open(UpperCamelCase__ , encoding='utf-8' ) as f:
A = yaml.safe_load(f.read() )
# Get to the API doc
A = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A = content[api_idx]['sections']
# Then to the model doc
A = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
A = api_doc[scheduler_idx]['sections']
A = clean_doc_toc(UpperCamelCase__ )
A = False
if new_scheduler_doc != scheduler_doc:
A = True
if overwrite:
A = new_scheduler_doc
if diff:
if overwrite:
A = api_doc
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(UpperCamelCase__ , allow_unicode=UpperCamelCase__ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def __snake_case ( UpperCamelCase__=False ) -> Dict:
"""simple docstring"""
with open(UpperCamelCase__ , encoding='utf-8' ) as f:
A = yaml.safe_load(f.read() )
# Get to the API doc
A = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A = content[api_idx]['sections']
# Then to the model doc
A = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
A = False
A = api_doc[pipeline_idx]['sections']
A = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
A = pipeline_doc['section']
A = clean_doc_toc(UpperCamelCase__ )
if overwrite:
A = new_sub_pipeline_doc
new_pipeline_docs.append(UpperCamelCase__ )
# sort overall pipeline doc
A = clean_doc_toc(UpperCamelCase__ )
if new_pipeline_docs != pipeline_docs:
A = True
if overwrite:
A = new_pipeline_docs
if diff:
if overwrite:
A = api_doc
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(UpperCamelCase__ , allow_unicode=UpperCamelCase__ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
UpperCamelCase : Optional[Any] = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 690 |
"""simple docstring"""
def __snake_case ( UpperCamelCase__ ) -> list[int]:
"""simple docstring"""
A = [0 for i in range(len(UpperCamelCase__ ) )]
# initialize interval's left pointer and right pointer
A , A = 0, 0
for i in range(1 , len(UpperCamelCase__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
A = min(right_pointer - i + 1 , z_result[i - left_pointer] )
A = min_edge
while go_next(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
A , A = i, i + z_result[i] - 1
return z_result
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
"""simple docstring"""
return i + z_result[i] < len(UpperCamelCase__ ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
"""simple docstring"""
A = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
A = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(UpperCamelCase__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 1 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCamelCase : str = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase : Optional[Any] = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
UpperCamelCase : Tuple = spec.loader.load_module()
UpperCamelCase : str = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCamelCase : List[str] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
UpperCamelCase : int = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def __snake_case ( ) -> Union[str, Any]:
"""simple docstring"""
A = []
for config_class in list(CONFIG_MAPPING.values() ):
A = False
# source code of `config_class`
A = inspect.getsource(UpperCamelCase__ )
A = _re_checkpoint.findall(UpperCamelCase__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
A , A = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
A = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
A = True
break
A = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
A = '\n'.join(sorted(UpperCamelCase__ ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 690 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = LDMTextToImagePipeline
lowerCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
lowerCAmelCase = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase = False
def __a ( self : Dict ):
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
A = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
A = CLIPTextModel(_lowercase )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __a ( self : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any]=0 ):
if str(_lowercase ).startswith('mps' ):
A = torch.manual_seed(_lowercase )
else:
A = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : Any ):
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = LDMTextToImagePipeline(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_dummy_inputs(_lowercase )
A = pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
A = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : int , _lowercase : List[Any] , _lowercase : int=torch.floataa , _lowercase : int=0 ):
A = torch.manual_seed(_lowercase )
A = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
A = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : Union[str, Any] ):
A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_inputs(_lowercase )
A = pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
A = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
A = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : Tuple=torch.floataa , _lowercase : Optional[Any]=0 ):
A = torch.manual_seed(_lowercase )
A = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
A = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : List[str] ):
A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_inputs(_lowercase )
A = pipe(**_lowercase ).images[0]
A = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
A = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 690 | 1 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : List[Any] = logging.get_logger(__name__)
UpperCamelCase : int = {
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
UpperCamelCase : Dict = {
"vocab_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
},
}
UpperCamelCase : Optional[int] = {
"vinai/phobert-base": 256,
"vinai/phobert-large": 256,
}
def __snake_case ( UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
A = set(UpperCamelCase__ )
return pairs
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , _lowercase : List[Any] , _lowercase : int , _lowercase : Union[str, Any]="<s>" , _lowercase : Dict="</s>" , _lowercase : Dict="</s>" , _lowercase : List[Any]="<s>" , _lowercase : Optional[Any]="<unk>" , _lowercase : Any="<pad>" , _lowercase : int="<mask>" , **_lowercase : Union[str, Any] , ):
super().__init__(
bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , **_lowercase , )
A = vocab_file
A = merges_file
A = {}
A = 0
A = 1
A = 2
A = 3
self.add_from_file(_lowercase )
A = {v: k for k, v in self.encoder.items()}
with open(_lowercase , encoding='utf-8' ) as merges_handle:
A = merges_handle.read().split('\n' )[:-1]
A = [tuple(merge.split()[:-1] ) for merge in merges]
A = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
A = {}
def __a ( self : List[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A = [self.cls_token_id]
A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
def __a ( self : Dict , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __a ( self : Tuple ):
return len(self.encoder )
def __a ( self : Union[str, Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : List[str] , _lowercase : Optional[Any] ):
if token in self.cache:
return self.cache[token]
A = tuple(_lowercase )
A = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
A = get_pairs(_lowercase )
if not pairs:
return token
while True:
A = min(_lowercase , key=lambda _lowercase : self.bpe_ranks.get(_lowercase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(_lowercase ):
try:
A = word.index(_lowercase , _lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(_lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(_lowercase )
A = new_word
if len(_lowercase ) == 1:
break
else:
A = get_pairs(_lowercase )
A = '@@ '.join(_lowercase )
A = word[:-4]
A = word
return word
def __a ( self : List[str] , _lowercase : str ):
A = []
A = re.findall(R'\S+\n?' , _lowercase )
for token in words:
split_tokens.extend(list(self.bpe(_lowercase ).split(' ' ) ) )
return split_tokens
def __a ( self : Union[str, Any] , _lowercase : List[Any] ):
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def __a ( self : Optional[Any] , _lowercase : Optional[int] ):
return self.decoder.get(_lowercase , self.unk_token )
def __a ( self : Union[str, Any] , _lowercase : Dict ):
A = ' '.join(_lowercase ).replace('@@ ' , '' ).strip()
return out_string
def __a ( self : Dict , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
if os.path.abspath(self.merges_file ) != os.path.abspath(_lowercase ):
copyfile(self.merges_file , _lowercase )
return out_vocab_file, out_merge_file
def __a ( self : Dict , _lowercase : Union[str, Any] ):
if isinstance(_lowercase , _lowercase ):
try:
with open(_lowercase , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(_lowercase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' )
return
A = f.readlines()
for lineTmp in lines:
A = lineTmp.strip()
A = line.rfind(' ' )
if idx == -1:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt>\'' )
A = line[:idx]
A = len(self.encoder )
| 690 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
A = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase , cache_dir=_lowercase )
A = [t[-1] for t in os.walk(os.path.join(_lowercase , os.listdir(_lowercase )[0] , 'snapshots' ) )]
A = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 4
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1e-3
assert np.abs(np.abs(_lowercase , dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1
A = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_lowercase ) == num_samples
def __a ( self : Dict ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1
def __a ( self : List[str] ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __a ( self : str ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __a ( self : Any ):
A = FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , set_alpha_to_one=_lowercase , steps_offset=1 , )
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_lowercase , safety_checker=_lowercase , )
A = scheduler.create_state()
A = scheduler_state
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1
def __a ( self : List[str] ):
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.device_count()
A = num_samples * [prompt]
A = jax.random.split(jax.random.PRNGKey(0 ) , _lowercase )
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , )
A = replicate(_lowercase )
A = pipeline.prepare_inputs(_lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
A = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , use_memory_efficient_attention=_lowercase , )
A = replicate(_lowercase )
A = pipeline.prepare_inputs(_lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
A = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 690 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.