code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
def _snake_case ( lowercase__ , lowercase__ = False ):
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
_lowerCamelCase : List[Any] = [
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
_lowerCamelCase : Optional[int] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_UpperCAmelCase , 1 ):
if n < _p:
# then we have our last prime to check
_lowerCamelCase : Any = primes[:idx]
break
_lowerCamelCase : Any = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
_lowerCamelCase : Optional[int] = False
for r in range(_UpperCAmelCase ):
_lowerCamelCase : str = pow(_UpperCAmelCase , d * 2**r , _UpperCAmelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
_lowerCamelCase : Union[str, Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _snake_case ( ):
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin() | 350 |
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowercase__ = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowercase__ = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
lowercase__ = """zero2"""
lowercase__ = """zero3"""
lowercase__ = [ZEROa, ZEROa]
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_lowerCamelCase : List[str] = parameterized.to_safe_name('_'.join(str(lowercase__ ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
lowercase__ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
def A_ ( self , lowercase ):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = True , lowercase = True , lowercase = True , ):
_lowerCamelCase : List[str] = models[model]
_lowerCamelCase : Optional[int] = self.run_trainer(
stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , )
self.do_checks(lowercase )
return output_dir
def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = 1 , lowercase = True , lowercase = True , ):
_lowerCamelCase : List[str] = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase )
_lowerCamelCase : Any = F'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_lowerCamelCase : Optional[int] = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_lowerCamelCase : Optional[Any] = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_lowerCamelCase : Dict = self.get_launcher(lowercase )
_lowerCamelCase : Union[str, Any] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase , env=self.get_env() )
return output_dir
def A_ ( self , lowercase=False ):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
_lowerCamelCase : Any = min(2 , get_gpu_count() ) if distributed else 1
return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split() | 12 | 0 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase__ = 16
lowercase__ = 32
def _snake_case ( lowercase__ , lowercase__ = 16 , lowercase__ = "bert-base-cased" ):
_lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(lowercase__ )
_lowerCamelCase : Optional[Any] = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowercase__ ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : str = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCamelCase : str = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCamelCase : List[str] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowercase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(lowercase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
_lowerCamelCase : Optional[int] = DataLoader(
tokenized_datasets['train'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
_lowerCamelCase : Any = DataLoader(
tokenized_datasets['validation'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
model.eval()
_lowerCamelCase : Dict = 0
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**lowercase__ )
_lowerCamelCase : Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_lowerCamelCase : List[Any] = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase__ ) - 1:
_lowerCamelCase : Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_lowerCamelCase : Any = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
_lowerCamelCase : Tuple = metric.compute()
return eval_metric["accuracy"]
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCamelCase : int = config["""lr"""]
_lowerCamelCase : Union[str, Any] = int(config['num_epochs'] )
_lowerCamelCase : Optional[int] = int(config['seed'] )
_lowerCamelCase : Union[str, Any] = int(config['batch_size'] )
_lowerCamelCase : Optional[Any] = args.model_name_or_path
set_seed(lowercase__ )
_lowerCamelCase : Dict = get_dataloaders(lowercase__ , lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCamelCase : str = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
_lowerCamelCase : List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_lowerCamelCase : str = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
_lowerCamelCase : Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
_lowerCamelCase : Any = 1
_lowerCamelCase : str = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_lowerCamelCase : List[str] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
_lowerCamelCase : int = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCamelCase : List[Any] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
_lowerCamelCase : List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : str = evaluate.load('glue' , 'mrpc' )
_lowerCamelCase : Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
_lowerCamelCase : Dict = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
_lowerCamelCase : Optional[Any] = args.resume_from_checkpoint.split('epoch_' )[1]
_lowerCamelCase : int = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
_lowerCamelCase : Union[str, Any] = int(lowercase__ ) + 1
_lowerCamelCase : Optional[int] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
accelerator.print('resumed checkpoint performance:' , lowercase__ )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' , lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' , optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir , f'''state_{starting_epoch-1}.json''' ) , 'r' ) as f:
_lowerCamelCase : List[str] = json.load(lowercase__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
_lowerCamelCase : List[Any] = {}
for epoch in range(lowercase__ , lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
_lowerCamelCase : Optional[int] = model(**lowercase__ )
_lowerCamelCase : Dict = outputs.loss
_lowerCamelCase : int = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
_lowerCamelCase : List[str] = f'''epoch_{epoch}'''
_lowerCamelCase : Any = os.path.join(args.output_dir , lowercase__ )
accelerator.save_state(lowercase__ )
_lowerCamelCase : List[Any] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
_lowerCamelCase : Union[str, Any] = accuracy
_lowerCamelCase : Any = lr_scheduler.get_lr()[0]
_lowerCamelCase : str = optimizer.param_groups[0]["""lr"""]
_lowerCamelCase : List[Any] = epoch
_lowerCamelCase : Tuple = overall_step
accelerator.print(f'''epoch {epoch}:''' , lowercase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'''state_{epoch}.json''' ) , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
def _snake_case ( ):
_lowerCamelCase : List[Any] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=lowercase__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=lowercase__ , )
parser.add_argument(
'--output_dir' , type=lowercase__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=lowercase__ , default=lowercase__ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--partial_train_epoch' , type=lowercase__ , default=lowercase__ , help='If passed, the training will stop after this number of epochs.' , )
parser.add_argument(
'--num_epochs' , type=lowercase__ , default=2 , help='Number of train epochs.' , )
_lowerCamelCase : Optional[int] = parser.parse_args()
_lowerCamelCase : List[Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main() | 351 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = ["""pixel_values"""]
def __init__( self , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = 8 , **lowercase , ):
super().__init__(**lowercase )
_lowerCamelCase : Optional[Any] = do_rescale
_lowerCamelCase : Union[str, Any] = rescale_factor
_lowerCamelCase : Any = do_pad
_lowerCamelCase : Optional[int] = pad_size
def A_ ( self , lowercase , lowercase , lowercase = None , **lowercase ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A_ ( self , lowercase , lowercase , lowercase = None ):
_lowerCamelCase, _lowerCamelCase : Tuple = get_image_size(lowercase )
_lowerCamelCase : Union[str, Any] = (old_height // size + 1) * size - old_height
_lowerCamelCase : Tuple = (old_width // size + 1) * size - old_width
return pad(lowercase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=lowercase )
def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
_lowerCamelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Any = do_pad if do_pad is not None else self.do_pad
_lowerCamelCase : int = pad_size if pad_size is not None else self.pad_size
_lowerCamelCase : Dict = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
_lowerCamelCase : Dict = [to_numpy_array(lowercase ) for image in images]
if do_rescale:
_lowerCamelCase : str = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_pad:
_lowerCamelCase : str = [self.pad(lowercase , size=lowercase ) for image in images]
_lowerCamelCase : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
_lowerCamelCase : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=lowercase , tensor_type=lowercase ) | 12 | 0 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__ ( _UpperCAmelCase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = TransfoXLTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = False
def A_ ( self ):
super().setUp()
_lowerCamelCase : int = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
_lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def A_ ( self , **lowercase ):
_lowerCamelCase : Tuple = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def A_ ( self , lowercase ):
_lowerCamelCase : List[Any] = '''<unk> UNwanted , running'''
_lowerCamelCase : Optional[Any] = '''<unk> unwanted, running'''
return input_text, output_text
def A_ ( self ):
_lowerCamelCase : str = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=_UpperCAmelCase )
_lowerCamelCase : Dict = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(_UpperCAmelCase , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [0, 4, 8, 7] )
def A_ ( self ):
_lowerCamelCase : Optional[int] = TransfoXLTokenizer(lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def A_ ( self ):
_lowerCamelCase : str = TransfoXLTokenizer(lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = TransfoXLTokenizer(lower_case=_UpperCAmelCase )
_lowerCamelCase : Optional[Any] = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
_lowerCamelCase : Optional[Any] = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(_UpperCAmelCase ) , _UpperCAmelCase )
def A_ ( self ):
_lowerCamelCase : Optional[int] = self.get_tokenizer()
_lowerCamelCase : int = len(_UpperCAmelCase )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(_UpperCAmelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' ) | 352 |
"""simple docstring"""
import os
import string
import sys
lowercase__ = 1 << 8
lowercase__ = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
lowercase__ = KEYMAP["""up"""]
lowercase__ = KEYMAP["""left"""]
if sys.platform == "win32":
lowercase__ = []
lowercase__ = {
B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
lowercase__ = ord(str(i))
def _snake_case ( ):
if os.name == "nt":
import msvcrt
_lowerCamelCase : Any = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(lowercase__ ) == 0:
# Read the keystroke
_lowerCamelCase : str = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_lowerCamelCase : List[Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_lowerCamelCase : Union[str, Any] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(lowercase__ )
if ord(lowercase__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_lowerCamelCase : List[Any] = chr(KEYMAP['esc'] )
except KeyError:
_lowerCamelCase : int = cha[1]
else:
_lowerCamelCase : Optional[int] = ch.decode(lowercase__ )
else:
_lowerCamelCase : Union[str, Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_lowerCamelCase : List[str] = sys.stdin.fileno()
_lowerCamelCase : Tuple = termios.tcgetattr(lowercase__ )
try:
tty.setraw(lowercase__ )
_lowerCamelCase : Optional[Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(lowercase__ , termios.TCSADRAIN , lowercase__ )
return ch
def _snake_case ( ):
_lowerCamelCase : int = get_raw_chars()
if ord(lowercase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(lowercase__ ) == KEYMAP["esc"]:
_lowerCamelCase : Union[str, Any] = get_raw_chars()
if ord(lowercase__ ) == KEYMAP["mod_int"]:
_lowerCamelCase : List[Any] = get_raw_chars()
if ord(lowercase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowercase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(lowercase__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"] | 12 | 0 |
"""simple docstring"""
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
lowercase__ = get_logger()
lowercase__ = None
class lowerCAmelCase__ ( TensorFormatter[Mapping, """jax.Array""", Mapping] ):
'''simple docstring'''
def __init__( self , lowercase=None , lowercase=None , **lowercase ):
super().__init__(features=__snake_case )
import jax
from jaxlib.xla_client import Device
if isinstance(__snake_case , __snake_case ):
raise ValueError(
F'''Expected {device} to be a `str` not {type(__snake_case )}, as `jaxlib.xla_extension.Device` '''
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
_lowerCamelCase : int = device if isinstance(__snake_case , __snake_case ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCamelCase : Tuple = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'''Device with string identifier {self.device} not listed among the available '''
F'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
F'''device: {str(jax.devices()[0] )}.''' )
_lowerCamelCase : Optional[Any] = str(jax.devices()[0] )
_lowerCamelCase : List[Any] = jnp_array_kwargs
@staticmethod
def A_ ( ):
import jax
return {str(__snake_case ): device for device in jax.devices()}
def A_ ( self , lowercase ):
import jax
import jax.numpy as jnp
if isinstance(__snake_case , __snake_case ) and column:
if all(
isinstance(__snake_case , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__snake_case , axis=0 )
return column
def A_ ( self , lowercase ):
import jax
import jax.numpy as jnp
if isinstance(__snake_case , (str, bytes, type(__snake_case )) ):
return value
elif isinstance(__snake_case , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_lowerCamelCase : Optional[int] = {}
if isinstance(__snake_case , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_lowerCamelCase : Any = {'dtype': jnp.intaa}
else:
_lowerCamelCase : Optional[int] = {'dtype': jnp.intaa}
elif isinstance(__snake_case , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_lowerCamelCase : Optional[Any] = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__snake_case , PIL.Image.Image ):
_lowerCamelCase : str = np.asarray(__snake_case )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCamelCase : Any = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__snake_case , **{**default_dtype, **self.jnp_array_kwargs} )
def A_ ( self , lowercase ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__snake_case , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__snake_case , '__array__' ) and not isinstance(__snake_case , jax.Array ):
_lowerCamelCase : Dict = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__snake_case , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__snake_case ) for substruct in data_struct] )
elif isinstance(__snake_case , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__snake_case ) for substruct in data_struct] )
return self._tensorize(__snake_case )
def A_ ( self , lowercase ):
return map_nested(self._recursive_tensorize , __snake_case , map_list=__snake_case )
def A_ ( self , lowercase ):
_lowerCamelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(__snake_case )
_lowerCamelCase : int = self.python_features_decoder.decode_row(__snake_case )
return self.recursive_tensorize(__snake_case )
def A_ ( self , lowercase ):
_lowerCamelCase : Any = self.numpy_arrow_extractor().extract_column(__snake_case )
_lowerCamelCase : List[Any] = self.python_features_decoder.decode_column(__snake_case , pa_table.column_names[0] )
_lowerCamelCase : List[Any] = self.recursive_tensorize(__snake_case )
_lowerCamelCase : List[Any] = self._consolidate(__snake_case )
return column
def A_ ( self , lowercase ):
_lowerCamelCase : List[str] = self.numpy_arrow_extractor().extract_batch(__snake_case )
_lowerCamelCase : List[str] = self.python_features_decoder.decode_batch(__snake_case )
_lowerCamelCase : Tuple = self.recursive_tensorize(__snake_case )
for column_name in batch:
_lowerCamelCase : List[Any] = self._consolidate(batch[column_name] )
return batch | 353 |
"""simple docstring"""
from typing import Any
def _snake_case ( lowercase__ ):
if not input_list:
return []
_lowerCamelCase : Any = [input_list.count(lowercase__ ) for value in input_list]
_lowerCamelCase : Dict = max(lowercase__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod() | 12 | 0 |
"""simple docstring"""
lowercase__ = {
"""joule""": 1.0,
"""kilojoule""": 1000,
"""megajoule""": 100_0000,
"""gigajoule""": 10_0000_0000,
"""wattsecond""": 1.0,
"""watthour""": 3600,
"""kilowatthour""": 360_0000,
"""newtonmeter""": 1.0,
"""calorie_nutr""": 4186.8,
"""kilocalorie_nutr""": 418_6800.00,
"""electronvolt""": 1.602176634E-19,
"""britishthermalunit_it""": 1055.0_5585,
"""footpound""": 1.355818,
}
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
_lowerCamelCase : Dict = (
f'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
f'''Valid values are: {', '.join(lowerCamelCase__ )}'''
)
raise ValueError(lowerCamelCase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod() | 354 |
"""simple docstring"""
def _snake_case ( lowercase__ ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
_lowerCamelCase : List[str] = len(lowercase__ )
_lowerCamelCase : List[str] = max(lowercase__ )
_lowerCamelCase : List[str] = min(lowercase__ )
# create the counting array
_lowerCamelCase : List[Any] = coll_max + 1 - coll_min
_lowerCamelCase : List[Any] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowercase__ ):
_lowerCamelCase : Optional[int] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
_lowerCamelCase : Dict = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowercase__ ) ):
_lowerCamelCase : Any = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _snake_case ( lowercase__ ):
return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
lowercase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase__ = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted)) | 12 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"""tokenizer_file""": """tokenizer.json"""}
lowercase__ = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class lowerCAmelCase__ ( lowercase__ ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = ['''input_ids''', '''attention_mask''']
lowerCamelCase__ = None
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase="<unk>" , lowercase="<s>" , lowercase="</s>" , lowercase="<pad>" , lowercase=False , lowercase=False , **lowercase , ):
super().__init__(
_a , _a , tokenizer_file=_a , unk_token=_a , bos_token=_a , eos_token=_a , pad_token=_a , add_prefix_space=_a , clean_up_tokenization_spaces=_a , **_a , )
_lowerCamelCase : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _a ) != add_prefix_space:
_lowerCamelCase : List[Any] = getattr(_a , pre_tok_state.pop('type' ) )
_lowerCamelCase : Optional[int] = add_prefix_space
_lowerCamelCase : Tuple = pre_tok_class(**_a )
_lowerCamelCase : Optional[int] = add_prefix_space
def A_ ( self , *lowercase , **lowercase ):
_lowerCamelCase : Optional[int] = kwargs.get('is_split_into_words' , _a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
' pretokenized inputs.' )
return super()._batch_encode_plus(*_a , **_a )
def A_ ( self , *lowercase , **lowercase ):
_lowerCamelCase : List[Any] = kwargs.get('is_split_into_words' , _a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
' pretokenized inputs.' )
return super()._encode_plus(*_a , **_a )
def A_ ( self , lowercase , lowercase = None ):
_lowerCamelCase : Dict = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def A_ ( self , lowercase ):
_lowerCamelCase : Any = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_a , add_special_tokens=_a ) + [self.eos_token_id] )
if len(_a ) > self.model_max_length:
_lowerCamelCase : Any = input_ids[-self.model_max_length :]
return input_ids | 355 |
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
lowercase__ = parser.parse_args()
lowercase__ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 12 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ = ['image_processor', 'tokenizer']
lowerCamelCase__ = 'BlipImageProcessor'
lowerCamelCase__ = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , lowercase , lowercase ):
_lowerCamelCase : Optional[Any] = False
super().__init__(lowercase , lowercase )
_lowerCamelCase : List[Any] = self.image_processor
def __call__( self , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
_lowerCamelCase : List[Any] = self.tokenizer
_lowerCamelCase : int = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
return text_encoding
# add pixel_values
_lowerCamelCase : int = self.image_processor(lowercase , return_tensors=lowercase )
if text is not None:
_lowerCamelCase : Tuple = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
else:
_lowerCamelCase : Any = None
if text_encoding is not None:
encoding_image_processor.update(lowercase )
return encoding_image_processor
def A_ ( self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def A_ ( self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def A_ ( self ):
_lowerCamelCase : int = self.tokenizer.model_input_names
_lowerCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 356 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = (UnCLIPScheduler,)
def A_ ( self , **lowercase ):
_lowerCamelCase : Any = {
'num_train_timesteps': 1000,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**lowercase )
return config
def A_ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase )
def A_ ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowercase )
def A_ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase )
def A_ ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowercase )
def A_ ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowercase )
def A_ ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowercase , prev_timestep=lowercase )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[int] = self.get_scheduler_config(variance_type='fixed_small_log' )
_lowerCamelCase : str = scheduler_class(**lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5
def A_ ( self ):
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config(variance_type='learned_range' )
_lowerCamelCase : int = scheduler_class(**lowercase )
_lowerCamelCase : List[str] = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowercase ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=lowercase ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=lowercase ) - -0.0_01_00_11 < 1E-5
def A_ ( self ):
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config()
_lowerCamelCase : Tuple = scheduler_class(**lowercase )
_lowerCamelCase : Union[str, Any] = scheduler.timesteps
_lowerCamelCase : Any = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter
_lowerCamelCase : Optional[int] = torch.manual_seed(0 )
for i, t in enumerate(lowercase ):
# 1. predict noise residual
_lowerCamelCase : Tuple = model(lowercase , lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : List[Any] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample
_lowerCamelCase : Optional[int] = pred_prev_sample
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) )
_lowerCamelCase : List[Any] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def A_ ( self ):
_lowerCamelCase : Tuple = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : Optional[Any] = scheduler_class(**lowercase )
scheduler.set_timesteps(25 )
_lowerCamelCase : Optional[Any] = scheduler.timesteps
_lowerCamelCase : Optional[int] = self.dummy_model()
_lowerCamelCase : Any = self.dummy_sample_deter
_lowerCamelCase : str = torch.manual_seed(0 )
for i, t in enumerate(lowercase ):
# 1. predict noise residual
_lowerCamelCase : List[Any] = model(lowercase , lowercase )
if i + 1 == timesteps.shape[0]:
_lowerCamelCase : Optional[int] = None
else:
_lowerCamelCase : List[str] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : Union[str, Any] = scheduler.step(
lowercase , lowercase , lowercase , prev_timestep=lowercase , generator=lowercase ).prev_sample
_lowerCamelCase : List[Any] = pred_prev_sample
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) )
_lowerCamelCase : List[str] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def A_ ( self ):
pass
def A_ ( self ):
pass | 12 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ , lowercase__ ):
assert x is not None
assert y is not None
_lowerCamelCase : Optional[int] = len(__A )
_lowerCamelCase : List[str] = len(__A )
# declaring the array for storing the dp values
_lowerCamelCase : List[str] = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
_lowerCamelCase : List[Any] = 1 if x[i - 1] == y[j - 1] else 0
_lowerCamelCase : Optional[Any] = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
_lowerCamelCase : Optional[int] = ''
_lowerCamelCase, _lowerCamelCase : List[str] = m, n
while i > 0 and j > 0:
_lowerCamelCase : Tuple = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
_lowerCamelCase : Dict = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
lowercase__ = "AGGTAB"
lowercase__ = "GXTXAYB"
lowercase__ = 4
lowercase__ = "GTAB"
lowercase__ = longest_common_subsequence(a, b)
print("""len =""", ln, """, sub-sequence =""", subseq)
import doctest
doctest.testmod() | 357 |
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """data2vec-audio"""
def __init__( self , lowercase=32 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-5 , lowercase="gelu" , lowercase=(512, 512, 512, 512, 512, 512, 512) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(10, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=16 , lowercase=19 , lowercase=5 , lowercase=0.05 , lowercase=10 , lowercase=2 , lowercase=0.0 , lowercase=10 , lowercase=0 , lowercase="sum" , lowercase=False , lowercase=False , lowercase=256 , lowercase=(512, 512, 512, 512, 1500) , lowercase=(5, 3, 3, 1, 1) , lowercase=(1, 2, 3, 1, 1) , lowercase=512 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=3 , lowercase=2 , lowercase=3 , lowercase=None , **lowercase , ):
super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase )
_lowerCamelCase : str = hidden_size
_lowerCamelCase : str = feat_extract_activation
_lowerCamelCase : Optional[Any] = list(lowercase )
_lowerCamelCase : Dict = list(lowercase )
_lowerCamelCase : Dict = list(lowercase )
_lowerCamelCase : Optional[Any] = conv_bias
_lowerCamelCase : Union[str, Any] = num_conv_pos_embeddings
_lowerCamelCase : List[Any] = num_conv_pos_embedding_groups
_lowerCamelCase : List[Any] = conv_pos_kernel_size
_lowerCamelCase : Optional[int] = len(self.conv_dim )
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Any = hidden_dropout
_lowerCamelCase : Union[str, Any] = attention_dropout
_lowerCamelCase : str = activation_dropout
_lowerCamelCase : Any = feat_proj_dropout
_lowerCamelCase : Tuple = final_dropout
_lowerCamelCase : Union[str, Any] = layerdrop
_lowerCamelCase : List[Any] = layer_norm_eps
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : Optional[int] = vocab_size
_lowerCamelCase : Tuple = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase : Optional[Any] = mask_time_prob
_lowerCamelCase : List[Any] = mask_time_length
_lowerCamelCase : List[Any] = mask_time_min_masks
_lowerCamelCase : Tuple = mask_feature_prob
_lowerCamelCase : Optional[Any] = mask_feature_length
_lowerCamelCase : Dict = mask_feature_min_masks
# ctc loss
_lowerCamelCase : Tuple = ctc_loss_reduction
_lowerCamelCase : str = ctc_zero_infinity
# adapter
_lowerCamelCase : Union[str, Any] = add_adapter
_lowerCamelCase : List[Any] = adapter_kernel_size
_lowerCamelCase : Optional[Any] = adapter_stride
_lowerCamelCase : List[Any] = num_adapter_layers
_lowerCamelCase : int = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCamelCase : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCamelCase : List[str] = list(lowercase )
_lowerCamelCase : Optional[Any] = list(lowercase )
_lowerCamelCase : Any = list(lowercase )
_lowerCamelCase : Optional[Any] = xvector_output_dim
@property
def A_ ( self ):
return math.prod(self.conv_stride ) | 12 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__ ( lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = XLMTokenizer
lowerCamelCase__ = False
def A_ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
_lowerCamelCase : str = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
_lowerCamelCase : Optional[int] = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCamelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(_UpperCamelCase ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(_UpperCamelCase ) )
def A_ ( self , lowercase ):
_lowerCamelCase : Union[str, Any] = """lower newer"""
_lowerCamelCase : int = """lower newer"""
return input_text, output_text
def A_ ( self ):
_lowerCamelCase : Tuple = XLMTokenizer(self.vocab_file , self.merges_file )
_lowerCamelCase : str = """lower"""
_lowerCamelCase : Union[str, Any] = ["""low""", """er</w>"""]
_lowerCamelCase : str = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
_lowerCamelCase : Union[str, Any] = tokens + ["""<unk>"""]
_lowerCamelCase : List[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , _UpperCamelCase )
@slow
def A_ ( self ):
_lowerCamelCase : str = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' )
_lowerCamelCase : int = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCamelCase )
_lowerCamelCase : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCamelCase )
_lowerCamelCase : int = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase )
_lowerCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 358 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
lowercase__ = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """facebook/nllb-200-distilled-600M"""
lowerCamelCase__ = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
lowerCamelCase__ = """translator"""
lowerCamelCase__ = AutoTokenizer
lowerCamelCase__ = AutoModelForSeqaSeqLM
lowerCamelCase__ = LANGUAGE_CODES
lowerCamelCase__ = ["""text""", """text""", """text"""]
lowerCamelCase__ = ["""text"""]
def A_ ( self , lowercase , lowercase , lowercase ):
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''' )
_lowerCamelCase : str = self.lang_to_code[src_lang]
_lowerCamelCase : int = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowercase , return_tensors='pt' , src_lang=lowercase , tgt_lang=lowercase )
def A_ ( self , lowercase ):
return self.model.generate(**lowercase )
def A_ ( self , lowercase ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowercase ) | 12 | 0 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase=13 , lowercase=64 , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=[1, 16, 4, 4] , lowercase=None , ):
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : List[str] = batch_size
_lowerCamelCase : Union[str, Any] = image_size
_lowerCamelCase : Any = patch_size
_lowerCamelCase : Any = num_channels
_lowerCamelCase : Union[str, Any] = is_training
_lowerCamelCase : Optional[int] = use_labels
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : int = attention_probs_dropout_prob
_lowerCamelCase : Tuple = type_sequence_label_size
_lowerCamelCase : Union[str, Any] = initializer_range
_lowerCamelCase : Dict = scope
_lowerCamelCase : Optional[int] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
_lowerCamelCase : Any = (self.image_size // 32) ** 2
_lowerCamelCase : int = num_patches + 1
def A_ ( self ):
_lowerCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : str = None
if self.use_labels:
_lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def A_ ( self ):
_lowerCamelCase : Any = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowercase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=__lowercase , )
def A_ ( self , lowercase , lowercase , lowercase ):
_lowerCamelCase : List[str] = ViTHybridModel(config=__lowercase )
model.to(__lowercase )
model.eval()
_lowerCamelCase : Dict = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self , lowercase , lowercase , lowercase ):
_lowerCamelCase : int = self.type_sequence_label_size
_lowerCamelCase : List[Any] = ViTHybridForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
_lowerCamelCase : Tuple = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A_ ( self ):
_lowerCamelCase : Tuple = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = config_and_inputs
_lowerCamelCase : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __A, __A, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowerCamelCase__ = (
{'feature-extraction': ViTHybridModel, 'image-classification': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def A_ ( self ):
_lowerCamelCase : Optional[int] = ViTHybridModelTester(self )
_lowerCamelCase : Optional[int] = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def A_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def A_ ( self ):
pass
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowercase )
_lowerCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCamelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __lowercase )
def A_ ( self ):
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def A_ ( self ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : int = _config_zero_init(__lowercase )
for model_class in self.all_model_classes:
_lowerCamelCase : List[Any] = model_class(config=__lowercase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
_lowerCamelCase : Tuple = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def A_ ( self ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : str = ViTHybridModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def _snake_case ( ):
_lowerCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A_ ( self ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__lowercase )
_lowerCamelCase : str = self.default_image_processor
_lowerCamelCase : Optional[Any] = prepare_img()
_lowerCamelCase : Optional[int] = image_processor(images=__lowercase , return_tensors='pt' ).to(__lowercase )
# forward pass
with torch.no_grad():
_lowerCamelCase : List[Any] = model(**__lowercase )
# verify the logits
_lowerCamelCase : Any = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
_lowerCamelCase : Tuple = torch.tensor([-1.90_90, -0.49_93, -0.23_89] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
@slow
@require_accelerate
def A_ ( self ):
_lowerCamelCase : Tuple = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
_lowerCamelCase : List[Any] = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : List[str] = image_processor(images=__lowercase , return_tensors='pt' )
_lowerCamelCase : Dict = model(**__lowercase )
_lowerCamelCase : Union[str, Any] = outputs.logits
# model predicts one of the 1000 ImageNet classes
_lowerCamelCase : List[Any] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' ) | 359 |
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def A_ ( self , lowercase , lowercase , lowercase ):
_lowerCamelCase : Optional[int] = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
_lowerCamelCase : Tuple = VideoClassificationPipeline(model=lowercase , image_processor=lowercase , top_k=2 )
_lowerCamelCase : List[str] = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def A_ ( self , lowercase , lowercase ):
for example in examples:
_lowerCamelCase : Tuple = video_classifier(lowercase )
self.assertEqual(
lowercase , [
{'score': ANY(lowercase ), 'label': ANY(lowercase )},
{'score': ANY(lowercase ), 'label': ANY(lowercase )},
] , )
@require_torch
def A_ ( self ):
_lowerCamelCase : Optional[Any] = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
_lowerCamelCase : Tuple = VideoMAEFeatureExtractor(
size={'shortest_edge': 10} , crop_size={'height': 10, 'width': 10} )
_lowerCamelCase : Dict = pipeline(
'video-classification' , model=lowercase , feature_extractor=lowercase , frame_sampling_rate=4 )
_lowerCamelCase : Any = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
_lowerCamelCase : Dict = video_classifier(lowercase , top_k=2 )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}] , )
_lowerCamelCase : str = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
[{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}],
[{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}],
] , )
@require_tf
def A_ ( self ):
pass | 12 | 0 |
"""simple docstring"""
from typing import Any
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
_validation(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
# Creates data structures and fill initial step
_lowerCamelCase : dict = {}
_lowerCamelCase : dict = {}
for state in states_space:
_lowerCamelCase : int = observations_space[0]
_lowerCamelCase : Optional[Any] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
_lowerCamelCase : List[str] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__lowerCamelCase ) ):
_lowerCamelCase : Tuple = observations_space[o]
_lowerCamelCase : str = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_lowerCamelCase : Union[str, Any] = ""
_lowerCamelCase : Dict = -1
for k_state in states_space:
_lowerCamelCase : Tuple = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_lowerCamelCase : Tuple = probability
_lowerCamelCase : int = k_state
# Update probabilities and pointers dicts
_lowerCamelCase : List[Any] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_lowerCamelCase : Any = arg_max
# The final observation
_lowerCamelCase : Optional[int] = observations_space[len(__lowerCamelCase ) - 1]
# argmax for given final observation
_lowerCamelCase : Optional[Any] = ""
_lowerCamelCase : str = -1
for k_state in states_space:
_lowerCamelCase : Tuple = probabilities[(k_state, final_observation)]
if probability > max_probability:
_lowerCamelCase : List[str] = probability
_lowerCamelCase : Union[str, Any] = k_state
_lowerCamelCase : List[str] = arg_max
# Process pointers backwards
_lowerCamelCase : Union[str, Any] = last_state
_lowerCamelCase : Tuple = []
for o in range(len(__lowerCamelCase ) - 1 , -1 , -1 ):
result.append(__lowerCamelCase )
_lowerCamelCase : Union[str, Any] = pointers[previous, observations_space[o]]
result.reverse()
return result
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
_validate_not_empty(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
_validate_lists(__lowerCamelCase , __lowerCamelCase )
_validate_dicts(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('There\'s an empty parameter' )
def _snake_case ( lowercase__ , lowercase__ ):
_validate_list(__lowerCamelCase , 'observations_space' )
_validate_list(__lowerCamelCase , 'states_space' )
def _snake_case ( lowercase__ , lowercase__ ):
if not isinstance(_object , __lowerCamelCase ):
_lowerCamelCase : Tuple = f'''{var_name} must be a list'''
raise ValueError(__lowerCamelCase )
else:
for x in _object:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
_lowerCamelCase : int = f'''{var_name} must be a list of strings'''
raise ValueError(__lowerCamelCase )
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , ):
_validate_dict(__lowerCamelCase , 'initial_probabilities' , __lowerCamelCase )
_validate_nested_dict(__lowerCamelCase , 'transition_probabilities' )
_validate_nested_dict(__lowerCamelCase , 'emission_probabilities' )
def _snake_case ( lowercase__ , lowercase__ ):
_validate_dict(_object , __lowerCamelCase , __lowerCamelCase )
for x in _object.values():
_validate_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False ):
if not isinstance(_object , __lowerCamelCase ):
_lowerCamelCase : List[str] = f'''{var_name} must be a dict'''
raise ValueError(__lowerCamelCase )
if not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for x in _object ):
_lowerCamelCase : Optional[Any] = f'''{var_name} all keys must be strings'''
raise ValueError(__lowerCamelCase )
if not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for x in _object.values() ):
_lowerCamelCase : Union[str, Any] = "nested dictionary " if nested else ""
_lowerCamelCase : Any = f'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(__lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod() | 360 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase__ = {
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 12 | 0 |
"""simple docstring"""
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
lowercase__ = """bert-base-cased"""
lowercase__ = """fp16"""
lowercase__ = """bf16"""
lowercase__ = [FPaa, BFaa]
@require_fsdp
@require_cuda
class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def A_ ( self ):
super().setUp()
_lowerCamelCase : Union[str, Any] = dict(
ACCELERATE_USE_FSDP='true' , MASTER_ADDR='localhost' , MASTER_PORT='10999' , RANK='0' , LOCAL_RANK='0' , WORLD_SIZE='1' , )
def A_ ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(_a ):
_lowerCamelCase : int = self.dist_env.copy()
_lowerCamelCase : str = F'''{i + 1}'''
_lowerCamelCase : Tuple = strategy
with mockenv_context(**_a ):
_lowerCamelCase : Tuple = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def A_ ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(_a ):
_lowerCamelCase : List[Any] = self.dist_env.copy()
_lowerCamelCase : Optional[Any] = prefetch_policy
with mockenv_context(**_a ):
_lowerCamelCase : Optional[int] = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def A_ ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(_a ):
_lowerCamelCase : Dict = self.dist_env.copy()
_lowerCamelCase : Tuple = state_dict_type
with mockenv_context(**_a ):
_lowerCamelCase : str = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def A_ ( self ):
_lowerCamelCase : Dict = AutoModel.from_pretrained(_a )
for policy in FSDP_AUTO_WRAP_POLICY:
_lowerCamelCase : Union[str, Any] = self.dist_env.copy()
_lowerCamelCase : int = policy
if policy == "TRANSFORMER_BASED_WRAP":
_lowerCamelCase : str = 'BertLayer'
elif policy == "SIZE_BASED_WRAP":
_lowerCamelCase : Optional[int] = '2000'
with mockenv_context(**_a ):
_lowerCamelCase : List[str] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_a )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
_lowerCamelCase : Any = self.dist_env.copy()
_lowerCamelCase : List[Any] = 'TRANSFORMER_BASED_WRAP'
_lowerCamelCase : str = 'T5Layer'
with mockenv_context(**_a ):
_lowerCamelCase : List[str] = FullyShardedDataParallelPlugin()
with self.assertRaises(_a ) as cm:
fsdp_plugin.set_auto_wrap_policy(_a )
self.assertTrue('Could not find the transformer layer class to wrap in the model.' in str(cm.exception ) )
_lowerCamelCase : Tuple = self.dist_env.copy()
_lowerCamelCase : Tuple = 'SIZE_BASED_WRAP'
_lowerCamelCase : str = '0'
with mockenv_context(**_a ):
_lowerCamelCase : List[str] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_a )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def A_ ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
_lowerCamelCase : List[Any] = self.dist_env.copy()
_lowerCamelCase : Optional[Any] = mp_dtype
with mockenv_context(**_a ):
_lowerCamelCase : List[str] = Accelerator()
if mp_dtype == "fp16":
_lowerCamelCase : Optional[int] = torch.floataa
elif mp_dtype == "bf16":
_lowerCamelCase : Optional[int] = torch.bfloataa
_lowerCamelCase : Any = MixedPrecision(param_dtype=_a , reduce_dtype=_a , buffer_dtype=_a )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , _a )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , _a ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(_a )
def A_ ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
_lowerCamelCase : Union[str, Any] = self.dist_env.copy()
_lowerCamelCase : Optional[Any] = str(_a ).lower()
with mockenv_context(**_a ):
_lowerCamelCase : Any = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=_a ) )
@require_fsdp
@require_multi_gpu
@slow
class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def A_ ( self ):
super().setUp()
_lowerCamelCase : Tuple = 0.82
_lowerCamelCase : Dict = [
'fsdp_shard_grad_op_transformer_based_wrap',
'fsdp_full_shard_transformer_based_wrap',
]
_lowerCamelCase : Optional[Any] = {
'multi_gpu_fp16': 3200,
'fsdp_shard_grad_op_transformer_based_wrap_fp16': 2000,
'fsdp_full_shard_transformer_based_wrap_fp16': 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
_lowerCamelCase : Union[str, Any] = 160
_lowerCamelCase : Any = 160
_lowerCamelCase : List[Any] = inspect.getfile(accelerate.test_utils )
_lowerCamelCase : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps'] )
def A_ ( self ):
_lowerCamelCase : int = os.path.join(self.test_scripts_folder , 'test_performance.py' )
_lowerCamelCase : List[Any] = ['accelerate', 'launch', '--num_processes=2', '--num_machines=1', '--machine_rank=0', '--use_fsdp']
for config in self.performance_configs:
_lowerCamelCase : List[Any] = cmd.copy()
for i, strategy in enumerate(_a ):
if strategy.lower() in config:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append('--mixed_precision=no' )
else:
cmd_config.append('--mixed_precision=fp16' )
if "cpu_offload" in config:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
def A_ ( self ):
_lowerCamelCase : Any = os.path.join(self.test_scripts_folder , 'test_checkpointing.py' )
_lowerCamelCase : Tuple = [
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
'--use_fsdp',
'--mixed_precision=fp16',
'--fsdp_transformer_layer_cls_to_wrap=BertLayer',
]
for i, strategy in enumerate(_a ):
_lowerCamelCase : Optional[Any] = cmd.copy()
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
_lowerCamelCase : str = len(_a )
for state_dict_type in FSDP_STATE_DICT_TYPE:
_lowerCamelCase : Any = cmd_config[:state_dict_config_index]
cmd_config.append(F'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
'--partial_train_epoch=1',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
_lowerCamelCase : List[str] = cmd_config[:-1]
_lowerCamelCase : Optional[int] = os.path.join(self.tmpdir , 'epoch_0' )
cmd_config.extend(
[
F'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
def A_ ( self ):
_lowerCamelCase : Any = os.path.join(self.test_scripts_folder , 'test_peak_memory_usage.py' )
_lowerCamelCase : Tuple = [
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
_lowerCamelCase : Union[str, Any] = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['--mixed_precision=fp16'] )
else:
cmd_config.extend(['--mixed_precision=no'] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['--use_fsdp'] )
for i, strategy in enumerate(_a ):
if strategy.lower() in spec:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
F'''--n_train={self.n_train}''',
F'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
| 361 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _snake_case ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None ):
if attention_mask is None:
_lowerCamelCase : List[str] = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = OPTConfig
lowerCamelCase__ = {}
lowerCamelCase__ = """gelu"""
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=16 , lowercase=2 , lowercase=4 , lowercase=4 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , lowercase=16 , lowercase=16 , ):
_lowerCamelCase : Tuple = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : str = is_training
_lowerCamelCase : Optional[int] = use_labels
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[str] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : List[Any] = eos_token_id
_lowerCamelCase : Tuple = pad_token_id
_lowerCamelCase : List[str] = bos_token_id
_lowerCamelCase : Optional[int] = embed_dim
_lowerCamelCase : List[str] = word_embed_proj_dim
_lowerCamelCase : Any = False
def A_ ( self ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowerCamelCase : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowerCamelCase : str = tf.concat([input_ids, eos_tensor] , axis=1 )
_lowerCamelCase : Tuple = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase , **self.config_updates , )
_lowerCamelCase : int = prepare_opt_inputs_dict(lowercase , lowercase )
return config, inputs_dict
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : Optional[Any] = TFOPTModel(config=lowercase )
_lowerCamelCase : Optional[Any] = inputs_dict['input_ids']
_lowerCamelCase : str = input_ids[:1, :]
_lowerCamelCase : Dict = inputs_dict['attention_mask'][:1, :]
_lowerCamelCase : Optional[Any] = 1
# first forward pass
_lowerCamelCase : Any = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
_lowerCamelCase, _lowerCamelCase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCamelCase : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCamelCase : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_lowerCamelCase : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
_lowerCamelCase : Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_lowerCamelCase : Optional[Any] = model(lowercase , attention_mask=lowercase )[0]
_lowerCamelCase : List[str] = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_lowerCamelCase : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_lowerCamelCase : Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
_lowerCamelCase : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
@require_tf
class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowerCamelCase__ = (TFOPTForCausalLM,) if is_tf_available() else ()
lowerCamelCase__ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = 10
def A_ ( self ):
_lowerCamelCase : int = TFOPTModelTester(self )
_lowerCamelCase : Tuple = ConfigTester(self , config_class=lowercase )
def A_ ( self ):
self.config_tester.run_common_tests()
def A_ ( self ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowercase , lowercase ):
if hasattr(lowercase , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowercase , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
_lowerCamelCase : Optional[int] = model_class(config=lowercase )
_lowerCamelCase : int = _get_word_embedding_weight(lowercase , model.get_input_embeddings() )
_lowerCamelCase : Tuple = _get_word_embedding_weight(lowercase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowercase )
_lowerCamelCase : str = _get_word_embedding_weight(lowercase , model.get_input_embeddings() )
_lowerCamelCase : Any = _get_word_embedding_weight(lowercase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_lowerCamelCase : Union[str, Any] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowercase )
# check that weights remain the same after resizing
_lowerCamelCase : int = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_lowerCamelCase : Optional[Any] = False
self.assertTrue(lowercase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowercase )
_lowerCamelCase : Dict = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_lowerCamelCase : Union[str, Any] = False
self.assertTrue(lowercase )
def _snake_case ( lowercase__ ):
return tf.constant(lowercase__ , dtype=tf.intaa )
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = 99
def A_ ( self ):
_lowerCamelCase : Tuple = tf.ones((4, 1) , dtype=tf.intaa ) * 2
_lowerCamelCase : Tuple = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
_lowerCamelCase : int = input_ids.shape[0]
_lowerCamelCase : List[Any] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A_ ( self ):
_lowerCamelCase : Tuple = TFOPTModel.from_pretrained('facebook/opt-350m' )
_lowerCamelCase : List[Any] = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_lowerCamelCase : List[str] = tf.not_equal(lowercase , model.config.pad_token_id )
with tf.GradientTape():
_lowerCamelCase : List[str] = model(input_ids=lowercase , attention_mask=lowercase ).last_hidden_state
_lowerCamelCase : Optional[Any] = (1, 11, 512)
self.assertEqual(output.shape , lowercase )
_lowerCamelCase : List[str] = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-3 ) )
_lowerCamelCase : List[str] = tf.function(lowercase , jit_compile=lowercase )
_lowerCamelCase : Union[str, Any] = xla_generate(lowercase , lowercase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-2 ) )
@require_tf
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
super().setUp()
_lowerCamelCase : List[Any] = 'facebook/opt-350m'
def A_ ( self ):
_lowerCamelCase : int = TFOPTForCausalLM.from_pretrained(self.path_model )
_lowerCamelCase : List[Any] = GPTaTokenizer.from_pretrained(self.path_model )
_lowerCamelCase : List[str] = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_lowerCamelCase : List[Any] = tokenizer(lowercase , return_tensors='tf' , padding=lowercase , add_special_tokens=lowercase )
_lowerCamelCase : Optional[int] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
_lowerCamelCase : Any = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) )
_lowerCamelCase : Tuple = tf.function(lowercase , jit_compile=lowercase )
_lowerCamelCase : List[Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) )
@require_tf
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def A_ ( self ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def A_ ( self ):
_lowerCamelCase : str = 'facebook/opt-125m'
_lowerCamelCase : Dict = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Optional[int] = GPTaTokenizer.from_pretrained(lowercase )
_lowerCamelCase : Dict = TFOPTForCausalLM.from_pretrained(lowercase )
for prompt in self.prompts:
_lowerCamelCase : int = tokenizer(lowercase , return_tensors='tf' ).input_ids
_lowerCamelCase : int = model.generate(lowercase , max_length=10 )
_lowerCamelCase : Any = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
predicted_outputs += generated_string
self.assertListEqual(lowercase , lowercase )
def A_ ( self ):
_lowerCamelCase : List[Any] = 'facebook/opt-350m'
_lowerCamelCase : int = GPTaTokenizer.from_pretrained(lowercase )
_lowerCamelCase : Optional[int] = TFOPTForCausalLM.from_pretrained(lowercase )
_lowerCamelCase : Any = 'left'
# use different length sentences to test batching
_lowerCamelCase : Optional[int] = [
'Hello, my dog is a little',
'Today, I',
]
_lowerCamelCase : Dict = tokenizer(lowercase , return_tensors='tf' , padding=lowercase )
_lowerCamelCase : int = inputs['input_ids']
_lowerCamelCase : Tuple = model.generate(input_ids=lowercase , attention_mask=inputs['attention_mask'] )
_lowerCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
_lowerCamelCase : Union[str, Any] = model.generate(input_ids=lowercase )
_lowerCamelCase : Dict = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
_lowerCamelCase : int = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
_lowerCamelCase : Union[str, Any] = model.generate(input_ids=lowercase , max_length=model.config.max_length - num_paddings )
_lowerCamelCase : List[Any] = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
_lowerCamelCase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase )
_lowerCamelCase : Optional[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase )
_lowerCamelCase : Optional[Any] = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , [non_padded_sentence, padded_sentence] )
def A_ ( self ):
_lowerCamelCase : Tuple = 'facebook/opt-350m'
_lowerCamelCase : List[Any] = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Optional[Any] = GPTaTokenizer.from_pretrained(lowercase )
_lowerCamelCase : Optional[Any] = TFOPTForCausalLM.from_pretrained(lowercase )
for prompt in self.prompts:
_lowerCamelCase : List[Any] = tokenizer(lowercase , return_tensors='tf' ).input_ids
_lowerCamelCase : Optional[Any] = model.generate(lowercase , max_length=10 )
_lowerCamelCase : Dict = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
predicted_outputs += generated_string
self.assertListEqual(lowercase , lowercase ) | 12 | 0 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
lowercase__ = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = True , ):
_lowerCamelCase : Tuple = [file for file in os.listdir(lowercase ) if os.path.isfile(os.path.join(lowercase , lowercase ) )]
if identifier is not None:
_lowerCamelCase : Tuple = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase , lowercase ):
for n_ in n_identifier:
_lowerCamelCase : List[Any] = [file for file in files if n_ not in file]
else:
_lowerCamelCase : Union[str, Any] = [file for file in files if n_identifier not in file]
_lowerCamelCase : Any = ignore_files or []
ignore_files.append('__init__.py' )
_lowerCamelCase : Union[str, Any] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , lowercase )
if only_modules:
_lowerCamelCase : Dict = file.split('.' )[0]
try:
_lowerCamelCase : List[str] = getattr(lowercase , lowercase )
_lowerCamelCase : List[Any] = doctest.DocTestSuite(lowercase )
_lowerCamelCase : Tuple = unittest.TextTestRunner().run(lowercase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'''{module_identifier} is not a module.''' )
else:
_lowerCamelCase : Optional[Any] = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def A_ ( self ):
_lowerCamelCase : List[str] = Path('src/transformers' )
_lowerCamelCase : List[str] = """modeling"""
_lowerCamelCase : List[str] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(lowercase , identifier=lowercase , ignore_files=lowercase )
def A_ ( self ):
_lowerCamelCase : List[str] = Path('src/transformers' )
_lowerCamelCase : List[Any] = """tokenization"""
self.analyze_directory(lowercase , identifier=lowercase )
def A_ ( self ):
_lowerCamelCase : str = Path('src/transformers' )
_lowerCamelCase : Optional[int] = """configuration"""
self.analyze_directory(lowercase , identifier=lowercase )
def A_ ( self ):
_lowerCamelCase : Dict = Path('src/transformers' )
_lowerCamelCase : List[Any] = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(lowercase , n_identifier=lowercase )
def A_ ( self ):
_lowerCamelCase : Any = Path('docs/source' )
_lowerCamelCase : Optional[int] = ["""favicon.ico"""]
self.analyze_directory(lowercase , ignore_files=lowercase , only_modules=lowercase ) | 362 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """philschmid/bart-large-cnn-samsum"""
lowerCamelCase__ = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
lowerCamelCase__ = """summarizer"""
lowerCamelCase__ = AutoTokenizer
lowerCamelCase__ = AutoModelForSeqaSeqLM
lowerCamelCase__ = ["""text"""]
lowerCamelCase__ = ["""text"""]
def A_ ( self , lowercase ):
return self.pre_processor(lowercase , return_tensors='pt' , truncation=lowercase )
def A_ ( self , lowercase ):
return self.model.generate(**lowercase )[0]
def A_ ( self , lowercase ):
return self.pre_processor.decode(lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase ) | 12 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
lowercase__ = """pytorch_model.bin"""
lowercase__ = """pytorch_model.bin.index.json"""
lowercase__ = """adapter_config.json"""
lowercase__ = """adapter_model.bin"""
lowercase__ = """adapter_model.safetensors"""
lowercase__ = """tf_model.h5"""
lowercase__ = """tf_model.h5.index.json"""
lowercase__ = """model.ckpt"""
lowercase__ = """flax_model.msgpack"""
lowercase__ = """flax_model.msgpack.index.json"""
lowercase__ = """model.safetensors"""
lowercase__ = """model.safetensors.index.json"""
lowercase__ = """config.json"""
lowercase__ = """preprocessor_config.json"""
lowercase__ = FEATURE_EXTRACTOR_NAME
lowercase__ = """generation_config.json"""
lowercase__ = """modelcard.json"""
lowercase__ = """▁"""
lowercase__ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
lowercase__ = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
lowercase__ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
lowercase__ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _snake_case ( lowercase__ ):
if version.parse(a_ ) < version.parse(a_ ):
if "dev" in min_version:
_lowerCamelCase : List[Any] = (
'This example requires a source install from HuggingFace Transformers (see '
'`https://huggingface.co/docs/transformers/installation#install-from-source`),'
)
else:
_lowerCamelCase : str = f'''This example requires a minimum version of {min_version},'''
error_message += f''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ 'Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '
'versions of HuggingFace Transformers.' ) | 363 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : Tuple = list(range(len(lowercase__ ) ) )
_lowerCamelCase : Any = [v / w for v, w in zip(lowercase__ , lowercase__ )]
index.sort(key=lambda lowercase__ : ratio[i] , reverse=lowercase__ )
_lowerCamelCase : float = 0
_lowerCamelCase : list[float] = [0] * len(lowercase__ )
for i in index:
if weight[i] <= capacity:
_lowerCamelCase : int = 1
max_value += value[i]
capacity -= weight[i]
else:
_lowerCamelCase : Any = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 12 | 0 |
"""simple docstring"""
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase ):
_lowerCamelCase : Any = n
_lowerCamelCase : Any = [None] * self.n
_lowerCamelCase : Tuple = 0 # index of the first element
_lowerCamelCase : str = 0
_lowerCamelCase : List[Any] = 0
def __len__( self ):
return self.size
def A_ ( self ):
return self.size == 0
def A_ ( self ):
return False if self.is_empty() else self.array[self.front]
def A_ ( self , lowercase ):
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
_lowerCamelCase : Union[str, Any] = data
_lowerCamelCase : Optional[int] = (self.rear + 1) % self.n
self.size += 1
return self
def A_ ( self ):
if self.size == 0:
raise Exception('UNDERFLOW' )
_lowerCamelCase : Optional[int] = self.array[self.front]
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Tuple = (self.front + 1) % self.n
self.size -= 1
return temp
| 364 |
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowercase__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowercase__ = []
lowercase__ = []
lowercase__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowercase__ = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F"🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results",
"""emoji""": True,
},
}
]
lowercase__ = 0
for log in Path().glob("""*.log"""):
lowercase__ = 0
with open(log, """r""") as f:
for line in f:
lowercase__ = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowercase__ = line["""nodeid"""]
if line.get("""duration""", None) is not None:
lowercase__ = F"{line['duration']:.4f}"
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowercase__ = []
log.unlink()
lowercase__ = """"""
lowercase__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowercase__ = []
lowercase__ = {}
for test in failed_tests:
lowercase__ = test[0].split("""::""")
lowercase__ = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowercase__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowercase__ = [test[0] for test in failed_table]
lowercase__ = list(set(files))
# Count number of instances in failed_tests
lowercase__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowercase__ = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
lowercase__ = """Too many failed tests, please see the full report in the Action results."""
lowercase__ = len(err) + 10
lowercase__ = message[: 3000 - offset] + F"\n...\n```\n{err}"
print(F"### {message}")
else:
lowercase__ = """No failed tests! 🤗"""
print(F"## {message}")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowercase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowercase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowercase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F"https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
payload.append(action_button)
lowercase__ = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}",
}
],
}
payload.append(date_report)
lowercase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowercase__ = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowercase__ = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowercase__ = row[0]
else:
lowercase__ = """"""
lowercase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F"Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
) | 12 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ = 100 ):
_lowerCamelCase : str = n * (n + 1) * (2 * n + 1) / 6
_lowerCamelCase : int = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"{solution() = }") | 365 |
"""simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """AutoTokenizer"""
lowerCamelCase__ = ["""tokenizer"""]
lowerCamelCase__ = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self , lowercase , lowercase=None ):
super().__init__(lowercase )
_lowerCamelCase : Optional[int] = speaker_embeddings
@classmethod
def A_ ( cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ):
if speaker_embeddings_dict_path is not None:
_lowerCamelCase : Optional[Any] = get_file_from_repo(
lowercase , lowercase , subfolder=kwargs.pop('subfolder' , lowercase ) , cache_dir=kwargs.pop('cache_dir' , lowercase ) , force_download=kwargs.pop('force_download' , lowercase ) , proxies=kwargs.pop('proxies' , lowercase ) , resume_download=kwargs.pop('resume_download' , lowercase ) , local_files_only=kwargs.pop('local_files_only' , lowercase ) , use_auth_token=kwargs.pop('use_auth_token' , lowercase ) , revision=kwargs.pop('revision' , lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'''`{os.path.join(lowercase , lowercase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
_lowerCamelCase : List[Any] = None
else:
with open(lowercase ) as speaker_embeddings_json:
_lowerCamelCase : Union[str, Any] = json.load(lowercase )
else:
_lowerCamelCase : Tuple = None
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase , **lowercase )
return cls(tokenizer=lowercase , speaker_embeddings=lowercase )
def A_ ( self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowercase , lowercase , 'v2' ) , exist_ok=lowercase )
_lowerCamelCase : int = {}
_lowerCamelCase : List[Any] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_lowerCamelCase : Optional[Any] = self._load_voice_preset(lowercase )
_lowerCamelCase : Any = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , lowercase , F'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=lowercase , )
_lowerCamelCase : List[str] = os.path.join(lowercase , F'''{prompt_key}_{key}.npy''' )
_lowerCamelCase : Optional[Any] = tmp_dict
with open(os.path.join(lowercase , lowercase ) , 'w' ) as fp:
json.dump(lowercase , lowercase )
super().save_pretrained(lowercase , lowercase , **lowercase )
def A_ ( self , lowercase = None , **lowercase ):
_lowerCamelCase : Tuple = self.speaker_embeddings[voice_preset]
_lowerCamelCase : Any = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
_lowerCamelCase : Union[str, Any] = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , lowercase ) , cache_dir=kwargs.pop('cache_dir' , lowercase ) , force_download=kwargs.pop('force_download' , lowercase ) , proxies=kwargs.pop('proxies' , lowercase ) , resume_download=kwargs.pop('resume_download' , lowercase ) , local_files_only=kwargs.pop('local_files_only' , lowercase ) , use_auth_token=kwargs.pop('use_auth_token' , lowercase ) , revision=kwargs.pop('revision' , lowercase ) , )
if path is None:
raise ValueError(
F'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
_lowerCamelCase : List[str] = np.load(lowercase )
return voice_preset_dict
def A_ ( self , lowercase = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ):
if voice_preset is not None and not isinstance(lowercase , lowercase ):
if (
isinstance(lowercase , lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_lowerCamelCase : Any = self._load_voice_preset(lowercase )
else:
if isinstance(lowercase , lowercase ) and not voice_preset.endswith('.npz' ):
_lowerCamelCase : Optional[Any] = voice_preset + '.npz'
_lowerCamelCase : Union[str, Any] = np.load(lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(lowercase , **lowercase )
_lowerCamelCase : Tuple = BatchFeature(data=lowercase , tensor_type=lowercase )
_lowerCamelCase : Any = self.tokenizer(
lowercase , return_tensors=lowercase , padding='max_length' , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , )
if voice_preset is not None:
_lowerCamelCase : Optional[int] = voice_preset
return encoded_text | 12 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ ):
return " ".join(
''.join(word[::-1] ) if len(a_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw""")) | 366 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowercase__ = False
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : Tuple = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
_lowerCamelCase : Dict = torch.manual_seed(0 )
_lowerCamelCase : Dict = pipe(
image=lowercase , generator=lowercase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
_lowerCamelCase : str = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCamelCase : Any = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 12 | 0 |
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _snake_case ( lowercase__ ):
return 1 / (1 + np.exp(-z ))
def _snake_case ( lowercase__ , lowercase__ ):
return (-y * np.log(_UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : Dict = np.dot(_UpperCamelCase , _UpperCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(_UpperCamelCase ) ) )
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__=70000 ):
_lowerCamelCase : Union[str, Any] = np.zeros(x.shape[1] )
for iterations in range(_UpperCamelCase ):
_lowerCamelCase : Tuple = np.dot(_UpperCamelCase , _UpperCamelCase )
_lowerCamelCase : str = sigmoid_function(_UpperCamelCase )
_lowerCamelCase : Tuple = np.dot(x.T , h - y ) / y.size
_lowerCamelCase : Optional[int] = theta - alpha * gradient # updating the weights
_lowerCamelCase : List[Any] = np.dot(_UpperCamelCase , _UpperCamelCase )
_lowerCamelCase : int = sigmoid_function(_UpperCamelCase )
_lowerCamelCase : str = cost_function(_UpperCamelCase , _UpperCamelCase )
if iterations % 100 == 0:
print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
lowercase__ = datasets.load_iris()
lowercase__ = iris.data[:, :2]
lowercase__ = (iris.target != 0) * 1
lowercase__ = 0.1
lowercase__ = logistic_reg(alpha, x, y, max_iterations=7_0000)
print("""theta: """, theta) # printing the theta i.e our weights vector
def _snake_case ( lowercase__ ):
return sigmoid_function(
np.dot(_UpperCamelCase , _UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""")
(lowercase__) = (x[:, 0].min(), x[:, 0].max())
(lowercase__) = (x[:, 1].min(), x[:, 1].max())
(lowercase__) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
lowercase__ = np.c_[xxa.ravel(), xxa.ravel()]
lowercase__ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""")
plt.legend()
plt.show()
| 367 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
lowercase__ = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
lowercase__ = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
lowercase__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def _snake_case ( lowercase__ ):
_lowerCamelCase : Tuple = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def _snake_case ( lowercase__ ):
return x[0]
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[Any] = get_letter_count(lowercase__ )
_lowerCamelCase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowercase__ )
_lowerCamelCase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowercase__ )
_lowerCamelCase : Optional[int] = ''.join(freq_to_letter[freq] )
_lowerCamelCase : Any = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowercase__ , reverse=lowercase__ )
_lowerCamelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowercase__ )
def _snake_case ( lowercase__ ):
_lowerCamelCase : str = get_frequency_order(lowercase__ )
_lowerCamelCase : Union[str, Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod() | 12 | 0 |
"""simple docstring"""
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowerCAmelCase__ ( a__, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = BertJapaneseTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = True
def A_ ( self ):
super().setUp()
_lowerCamelCase : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
_lowerCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def A_ ( self , lowercase ):
_lowerCamelCase : Tuple = '''こんにちは、世界。 \nこんばんは、世界。'''
_lowerCamelCase : str = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def A_ ( self , lowercase ):
_lowerCamelCase : int = self.get_input_output_texts(_lowerCamelCase )
_lowerCamelCase : List[Any] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
_lowerCamelCase : Union[str, Any] = tokenizer.decode(_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
return text, ids
def A_ ( self ):
pass # TODO add if relevant
def A_ ( self ):
pass # TODO add if relevant
def A_ ( self ):
pass # TODO add if relevant
def A_ ( self ):
_lowerCamelCase : str = self.tokenizer_class(self.vocab_file )
_lowerCamelCase : Optional[Any] = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(_lowerCamelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def A_ ( self ):
_lowerCamelCase : Dict = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(_lowerCamelCase )
_lowerCamelCase : Union[str, Any] = '''こんにちは、世界。\nこんばんは、世界。'''
_lowerCamelCase : Optional[int] = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_lowerCamelCase : Tuple = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(_lowerCamelCase , 'wb' ) as handle:
pickle.dump(_lowerCamelCase , _lowerCamelCase )
with open(_lowerCamelCase , 'rb' ) as handle:
_lowerCamelCase : Any = pickle.load(_lowerCamelCase )
_lowerCamelCase : Tuple = tokenizer_new.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def A_ ( self ):
try:
_lowerCamelCase : Any = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def A_ ( self ):
try:
_lowerCamelCase : str = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = MecabTokenizer(do_lower_case=_lowerCamelCase , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def A_ ( self ):
try:
_lowerCamelCase : List[str] = MecabTokenizer(
do_lower_case=_lowerCamelCase , normalize_text=_lowerCamelCase , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def A_ ( self ):
_lowerCamelCase : Any = MecabTokenizer(normalize_text=_lowerCamelCase , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def A_ ( self ):
_lowerCamelCase : Dict = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(_lowerCamelCase )
_lowerCamelCase : List[str] = '''こんにちは、世界。\nこんばんは、世界。'''
_lowerCamelCase : Dict = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(_lowerCamelCase , 'wb' ) as handle:
pickle.dump(_lowerCamelCase , _lowerCamelCase )
with open(_lowerCamelCase , 'rb' ) as handle:
_lowerCamelCase : int = pickle.load(_lowerCamelCase )
_lowerCamelCase : Optional[int] = tokenizer_new.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
@require_sudachi
def A_ ( self ):
_lowerCamelCase : List[Any] = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def A_ ( self ):
_lowerCamelCase : Any = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def A_ ( self ):
_lowerCamelCase : Dict = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def A_ ( self ):
_lowerCamelCase : str = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def A_ ( self ):
_lowerCamelCase : Dict = SudachiTokenizer(do_lower_case=_lowerCamelCase , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def A_ ( self ):
_lowerCamelCase : Optional[int] = SudachiTokenizer(normalize_text=_lowerCamelCase , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def A_ ( self ):
_lowerCamelCase : Any = SudachiTokenizer(trim_whitespace=_lowerCamelCase , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def A_ ( self ):
_lowerCamelCase : Any = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(_lowerCamelCase )
_lowerCamelCase : Dict = '''こんにちは、世界。\nこんばんは、世界。'''
_lowerCamelCase : Any = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(_lowerCamelCase , 'wb' ) as handle:
pickle.dump(_lowerCamelCase , _lowerCamelCase )
with open(_lowerCamelCase , 'rb' ) as handle:
_lowerCamelCase : Any = pickle.load(_lowerCamelCase )
_lowerCamelCase : Tuple = tokenizer_new.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
@require_jumanpp
def A_ ( self ):
_lowerCamelCase : List[str] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def A_ ( self ):
_lowerCamelCase : Dict = JumanppTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def A_ ( self ):
_lowerCamelCase : Any = JumanppTokenizer(normalize_text=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def A_ ( self ):
_lowerCamelCase : Optional[int] = JumanppTokenizer(trim_whitespace=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def A_ ( self ):
_lowerCamelCase : Optional[int] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def A_ ( self ):
_lowerCamelCase : Dict = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
_lowerCamelCase : List[Any] = {}
for i, token in enumerate(_lowerCamelCase ):
_lowerCamelCase : Any = i
_lowerCamelCase : Tuple = WordpieceTokenizer(vocab=_lowerCamelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def A_ ( self ):
_lowerCamelCase : str = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
_lowerCamelCase : List[str] = tokenizer.subword_tokenizer
_lowerCamelCase : str = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(_lowerCamelCase , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
_lowerCamelCase : int = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(_lowerCamelCase , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
_lowerCamelCase : Optional[int] = tokenizer.encode('ありがとう。' , add_special_tokens=_lowerCamelCase )
_lowerCamelCase : Optional[int] = tokenizer.encode('どういたしまして。' , add_special_tokens=_lowerCamelCase )
_lowerCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
_lowerCamelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCAmelCase__ ( a__, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = BertJapaneseTokenizer
lowerCamelCase__ = False
def A_ ( self ):
super().setUp()
_lowerCamelCase : Optional[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def A_ ( self , **lowercase ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **_lowerCamelCase )
def A_ ( self , lowercase ):
_lowerCamelCase : int = '''こんにちは、世界。 \nこんばんは、世界。'''
_lowerCamelCase : Optional[int] = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def A_ ( self ):
pass # TODO add if relevant
def A_ ( self ):
pass # TODO add if relevant
def A_ ( self ):
pass # TODO add if relevant
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
_lowerCamelCase : List[str] = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
_lowerCamelCase , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def A_ ( self ):
_lowerCamelCase : Any = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
_lowerCamelCase : str = {}
for i, token in enumerate(_lowerCamelCase ):
_lowerCamelCase : Optional[int] = i
_lowerCamelCase : Dict = CharacterTokenizer(vocab=_lowerCamelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def A_ ( self ):
_lowerCamelCase : str = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
_lowerCamelCase : Optional[Any] = tokenizer.encode('ありがとう。' , add_special_tokens=_lowerCamelCase )
_lowerCamelCase : str = tokenizer.encode('どういたしまして。' , add_special_tokens=_lowerCamelCase )
_lowerCamelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
_lowerCamelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : str = '''cl-tohoku/bert-base-japanese'''
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(_lowerCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
_lowerCamelCase : Optional[Any] = '''bert-base-cased'''
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(_lowerCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) ) | 368 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase ):
_lowerCamelCase : Dict = question_encoder
_lowerCamelCase : List[Any] = generator
_lowerCamelCase : Optional[Any] = self.question_encoder
def A_ ( self , lowercase ):
if os.path.isfile(lowercase ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(lowercase , exist_ok=lowercase )
_lowerCamelCase : List[Any] = os.path.join(lowercase , 'question_encoder_tokenizer' )
_lowerCamelCase : Dict = os.path.join(lowercase , 'generator_tokenizer' )
self.question_encoder.save_pretrained(lowercase )
self.generator.save_pretrained(lowercase )
@classmethod
def A_ ( cls , lowercase , **lowercase ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_lowerCamelCase : Optional[int] = kwargs.pop('config' , lowercase )
if config is None:
_lowerCamelCase : int = RagConfig.from_pretrained(lowercase )
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(
lowercase , config=config.question_encoder , subfolder='question_encoder_tokenizer' )
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained(
lowercase , config=config.generator , subfolder='generator_tokenizer' )
return cls(question_encoder=lowercase , generator=lowercase )
def __call__( self , *lowercase , **lowercase ):
return self.current_tokenizer(*lowercase , **lowercase )
def A_ ( self , *lowercase , **lowercase ):
return self.generator.batch_decode(*lowercase , **lowercase )
def A_ ( self , *lowercase , **lowercase ):
return self.generator.decode(*lowercase , **lowercase )
def A_ ( self ):
_lowerCamelCase : Any = self.question_encoder
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.generator
def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = "longest" , lowercase = None , lowercase = True , **lowercase , ):
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , lowercase , )
if max_length is None:
_lowerCamelCase : Optional[Any] = self.current_tokenizer.model_max_length
_lowerCamelCase : Optional[Any] = self(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , max_length=lowercase , padding=lowercase , truncation=lowercase , **lowercase , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowerCamelCase : int = self.current_tokenizer.model_max_length
_lowerCamelCase : str = self(
text_target=lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase , **lowercase , )
_lowerCamelCase : int = labels['input_ids']
return model_inputs | 12 | 0 |
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , **lowercase , ):
super().__init__(
_lowerCAmelCase , split=_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase , streaming=_lowerCAmelCase , num_proc=_lowerCAmelCase , **_lowerCAmelCase , )
_lowerCamelCase : List[Any] = field
_lowerCamelCase : Union[str, Any] = path_or_paths if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else {self.split: path_or_paths}
_lowerCamelCase : List[str] = Json(
cache_dir=_lowerCAmelCase , data_files=_lowerCAmelCase , features=_lowerCAmelCase , field=_lowerCAmelCase , **_lowerCAmelCase , )
def A_ ( self ):
# Build iterable dataset
if self.streaming:
_lowerCamelCase : Optional[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : List[Any] = None
_lowerCamelCase : List[Any] = None
self.builder.download_and_prepare(
download_config=_lowerCAmelCase , download_mode=_lowerCAmelCase , verification_mode=_lowerCAmelCase , base_path=_lowerCAmelCase , num_proc=self.num_proc , )
_lowerCamelCase : Optional[Any] = self.builder.as_dataset(
split=self.split , verification_mode=_lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase = None , lowercase = None , **lowercase , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
_lowerCamelCase : List[str] = dataset
_lowerCamelCase : Optional[Any] = path_or_buf
_lowerCamelCase : Tuple = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_lowerCamelCase : Union[str, Any] = num_proc
_lowerCamelCase : Any = """utf-8"""
_lowerCamelCase : List[Any] = to_json_kwargs
def A_ ( self ):
_lowerCamelCase : int = self.to_json_kwargs.pop('path_or_buf' , _lowerCAmelCase )
_lowerCamelCase : Tuple = self.to_json_kwargs.pop('orient' , 'records' )
_lowerCamelCase : List[Any] = self.to_json_kwargs.pop('lines' , True if orient == 'records' else False )
_lowerCamelCase : List[str] = self.to_json_kwargs.pop('index' , False if orient in ['split', 'table'] else True )
_lowerCamelCase : Dict = self.to_json_kwargs.pop('compression' , _lowerCAmelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , 'wb' , compression=_lowerCAmelCase ) as buffer:
_lowerCamelCase : str = self._write(file_obj=_lowerCAmelCase , orient=_lowerCAmelCase , lines=_lowerCAmelCase , index=_lowerCAmelCase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
' was passed. Please provide a local path instead.' )
_lowerCamelCase : Union[str, Any] = self._write(
file_obj=self.path_or_buf , orient=_lowerCAmelCase , lines=_lowerCAmelCase , index=_lowerCAmelCase , **self.to_json_kwargs )
return written
def A_ ( self , lowercase ):
_lowerCamelCase : List[str] = args
_lowerCamelCase : Tuple = query_table(
table=self.dataset.data , key=slice(_lowerCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
_lowerCamelCase : Optional[int] = batch.to_pandas().to_json(
path_or_buf=_lowerCAmelCase , orient=_lowerCAmelCase , lines=_lowerCAmelCase , index=_lowerCAmelCase , **_lowerCAmelCase )
if not json_str.endswith('\n' ):
json_str += "\n"
return json_str.encode(self.encoding )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , **lowercase , ):
_lowerCamelCase : List[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
_lowerCamelCase : Any = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_lowerCAmelCase )
else:
_lowerCamelCase : str = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _lowerCAmelCase , _lowerCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
written += file_obj.write(_lowerCAmelCase )
return written | 369 |
"""simple docstring"""
def _snake_case ( lowercase__ = 10 ):
if not isinstance(lowercase__ , lowercase__ ) or n < 0:
raise ValueError('Invalid input' )
_lowerCamelCase : str = 10**n
_lowerCamelCase : Union[str, Any] = 28433 * (pow(2 , 7830457 , lowercase__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"{solution(10) = }") | 12 | 0 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowerCAmelCase__ ( _UpperCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = """WhisperFeatureExtractor"""
lowerCamelCase__ = """WhisperTokenizer"""
def __init__( self , lowercase , lowercase ):
super().__init__(lowercase_ , lowercase_ )
_lowerCamelCase : Any = self.feature_extractor
_lowerCamelCase : Tuple = False
def A_ ( self , lowercase=None , lowercase=None , lowercase=True ):
return self.tokenizer.get_decoder_prompt_ids(task=lowercase_ , language=lowercase_ , no_timestamps=lowercase_ )
def __call__( self , *lowercase , **lowercase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowercase_ , **lowercase_ )
_lowerCamelCase : str = kwargs.pop('audio' , lowercase_ )
_lowerCamelCase : Tuple = kwargs.pop('sampling_rate' , lowercase_ )
_lowerCamelCase : Dict = kwargs.pop('text' , lowercase_ )
if len(lowercase_ ) > 0:
_lowerCamelCase : Tuple = args[0]
_lowerCamelCase : Any = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
_lowerCamelCase : List[Any] = self.feature_extractor(lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_ )
if text is not None:
_lowerCamelCase : Any = self.tokenizer(lowercase_ , **lowercase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowerCamelCase : List[Any] = encodings["""input_ids"""]
return inputs
def A_ ( self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def A_ ( self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
def A_ ( self , lowercase , lowercase="np" ):
return self.tokenizer.get_prompt_ids(lowercase_ , return_tensors=lowercase_ ) | 370 |
"""simple docstring"""
import argparse
import datetime
def _snake_case ( lowercase__ ):
_lowerCamelCase : Dict = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
_lowerCamelCase : str = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(lowercase__ ) < 11:
raise ValueError('Must be 10 characters long' )
# Get month
_lowerCamelCase : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('Month must be between 1 - 12' )
_lowerCamelCase : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get day
_lowerCamelCase : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('Date must be between 1 - 31' )
# Get second separator
_lowerCamelCase : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get year
_lowerCamelCase : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?' )
# Get datetime obj for validation
_lowerCamelCase : str = datetime.date(int(lowercase__ ) , int(lowercase__ ) , int(lowercase__ ) )
# Start math
if m <= 2:
_lowerCamelCase : str = y - 1
_lowerCamelCase : Tuple = m + 12
# maths var
_lowerCamelCase : int = int(str(lowercase__ )[:2] )
_lowerCamelCase : int = int(str(lowercase__ )[2:] )
_lowerCamelCase : int = int(2.6 * m - 5.3_9 )
_lowerCamelCase : int = int(c / 4 )
_lowerCamelCase : int = int(k / 4 )
_lowerCamelCase : int = int(d + k )
_lowerCamelCase : int = int(t + u + v + x )
_lowerCamelCase : int = int(z - (2 * c) )
_lowerCamelCase : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.' )
# Response
_lowerCamelCase : str = f'''Your date {date_input}, is a {days[str(lowercase__ )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = argparse.ArgumentParser(
description=(
"""Find out what day of the week nearly any date is or was. Enter """
"""date as a string in the mm-dd-yyyy or mm/dd/yyyy format"""
)
)
parser.add_argument(
"""date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)"""
)
lowercase__ = parser.parse_args()
zeller(args.date_input) | 12 | 0 |
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def _snake_case ( lowercase__ , lowercase__=1 ):
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def _snake_case ( lowercase__ , lowercase__=0 ):
_lowerCamelCase : str = []
for old_item in old_list:
_lowerCamelCase : Any = old_item.replace('in_layers.0' , 'norm1' )
_lowerCamelCase : Tuple = new_item.replace('in_layers.2' , 'conv1' )
_lowerCamelCase : Optional[int] = new_item.replace('out_layers.0' , 'norm2' )
_lowerCamelCase : Union[str, Any] = new_item.replace('out_layers.3' , 'conv2' )
_lowerCamelCase : Tuple = new_item.replace('emb_layers.1' , 'time_emb_proj' )
_lowerCamelCase : List[str] = new_item.replace('skip_connection' , 'conv_shortcut' )
_lowerCamelCase : List[str] = shave_segments(__a , n_shave_prefix_segments=__a )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def _snake_case ( lowercase__ , lowercase__=0 ):
_lowerCamelCase : Union[str, Any] = []
for old_item in old_list:
_lowerCamelCase : Union[str, Any] = old_item
_lowerCamelCase : str = new_item.replace('norm.weight' , 'group_norm.weight' )
_lowerCamelCase : Dict = new_item.replace('norm.bias' , 'group_norm.bias' )
_lowerCamelCase : List[Any] = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
_lowerCamelCase : Dict = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
_lowerCamelCase : Optional[int] = shave_segments(__a , n_shave_prefix_segments=__a )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=None ):
assert isinstance(__a , __a ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_lowerCamelCase : List[Any] = old_checkpoint[path]
_lowerCamelCase : Optional[int] = old_tensor.shape[0] // 3
_lowerCamelCase : List[Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_lowerCamelCase : Dict = old_tensor.shape[0] // config['num_head_channels'] // 3
_lowerCamelCase : Optional[int] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = old_tensor.split(channels // num_heads , dim=1 )
_lowerCamelCase : Tuple = query.reshape(__a )
_lowerCamelCase : Any = key.reshape(__a )
_lowerCamelCase : int = value.reshape(__a )
for path in paths:
_lowerCamelCase : Optional[Any] = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_lowerCamelCase : Dict = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
_lowerCamelCase : List[str] = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
_lowerCamelCase : List[str] = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
_lowerCamelCase : Optional[Any] = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_lowerCamelCase : Union[str, Any] = old_checkpoint[path['old']][:, :, 0]
else:
_lowerCamelCase : Optional[Any] = old_checkpoint[path['old']]
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Dict = {}
_lowerCamelCase : Union[str, Any] = checkpoint['time_embed.0.weight']
_lowerCamelCase : Union[str, Any] = checkpoint['time_embed.0.bias']
_lowerCamelCase : Optional[Any] = checkpoint['time_embed.2.weight']
_lowerCamelCase : str = checkpoint['time_embed.2.bias']
_lowerCamelCase : Union[str, Any] = checkpoint['input_blocks.0.0.weight']
_lowerCamelCase : Dict = checkpoint['input_blocks.0.0.bias']
_lowerCamelCase : List[Any] = checkpoint['out.0.weight']
_lowerCamelCase : Any = checkpoint['out.0.bias']
_lowerCamelCase : str = checkpoint['out.2.weight']
_lowerCamelCase : Union[str, Any] = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
_lowerCamelCase : Tuple = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
_lowerCamelCase : Optional[int] = {
layer_id: [key for key in checkpoint if f'''input_blocks.{layer_id}''' in key]
for layer_id in range(__a )
}
# Retrieves the keys for the middle blocks only
_lowerCamelCase : Dict = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
_lowerCamelCase : List[Any] = {
layer_id: [key for key in checkpoint if f'''middle_block.{layer_id}''' in key]
for layer_id in range(__a )
}
# Retrieves the keys for the output blocks only
_lowerCamelCase : Optional[Any] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
_lowerCamelCase : List[Any] = {
layer_id: [key for key in checkpoint if f'''output_blocks.{layer_id}''' in key]
for layer_id in range(__a )
}
for i in range(1 , __a ):
_lowerCamelCase : List[Any] = (i - 1) // (config['num_res_blocks'] + 1)
_lowerCamelCase : Tuple = (i - 1) % (config['num_res_blocks'] + 1)
_lowerCamelCase : Any = [key for key in input_blocks[i] if f'''input_blocks.{i}.0''' in key]
_lowerCamelCase : Optional[int] = [key for key in input_blocks[i] if f'''input_blocks.{i}.1''' in key]
if f'''input_blocks.{i}.0.op.weight''' in checkpoint:
_lowerCamelCase : List[str] = checkpoint[
f'''input_blocks.{i}.0.op.weight'''
]
_lowerCamelCase : Optional[int] = checkpoint[
f'''input_blocks.{i}.0.op.bias'''
]
continue
_lowerCamelCase : Tuple = renew_resnet_paths(__a )
_lowerCamelCase : int = {'old': f'''input_blocks.{i}.0''', 'new': f'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
_lowerCamelCase : str = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
__a , __a , __a , additional_replacements=[meta_path, resnet_op] , config=__a )
if len(__a ):
_lowerCamelCase : List[Any] = renew_attention_paths(__a )
_lowerCamelCase : Union[str, Any] = {
'old': f'''input_blocks.{i}.1''',
'new': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
_lowerCamelCase : Union[str, Any] = {
f'''input_blocks.{i}.1.qkv.bias''': {
'key': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'query': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'value': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
f'''input_blocks.{i}.1.qkv.weight''': {
'key': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'query': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'value': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__a , __a , __a , additional_replacements=[meta_path] , attention_paths_to_split=__a , config=__a , )
_lowerCamelCase : Optional[int] = middle_blocks[0]
_lowerCamelCase : Union[str, Any] = middle_blocks[1]
_lowerCamelCase : Optional[int] = middle_blocks[2]
_lowerCamelCase : List[Any] = renew_resnet_paths(__a )
assign_to_checkpoint(__a , __a , __a , config=__a )
_lowerCamelCase : Optional[Any] = renew_resnet_paths(__a )
assign_to_checkpoint(__a , __a , __a , config=__a )
_lowerCamelCase : List[str] = renew_attention_paths(__a )
_lowerCamelCase : Optional[int] = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
__a , __a , __a , attention_paths_to_split=__a , config=__a )
for i in range(__a ):
_lowerCamelCase : Tuple = i // (config['num_res_blocks'] + 1)
_lowerCamelCase : Optional[int] = i % (config['num_res_blocks'] + 1)
_lowerCamelCase : int = [shave_segments(__a , 2 ) for name in output_blocks[i]]
_lowerCamelCase : Optional[Any] = {}
for layer in output_block_layers:
_lowerCamelCase, _lowerCamelCase : Optional[Any] = layer.split('.' )[0], shave_segments(__a , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__a )
else:
_lowerCamelCase : Optional[Any] = [layer_name]
if len(__a ) > 1:
_lowerCamelCase : Optional[int] = [key for key in output_blocks[i] if f'''output_blocks.{i}.0''' in key]
_lowerCamelCase : str = [key for key in output_blocks[i] if f'''output_blocks.{i}.1''' in key]
_lowerCamelCase : Tuple = renew_resnet_paths(__a )
_lowerCamelCase : Any = renew_resnet_paths(__a )
_lowerCamelCase : Any = {'old': f'''output_blocks.{i}.0''', 'new': f'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_lowerCamelCase : str = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
_lowerCamelCase : Any = checkpoint[
f'''output_blocks.{i}.{index}.conv.weight'''
]
_lowerCamelCase : List[Any] = checkpoint[
f'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(__a ) == 2:
_lowerCamelCase : Union[str, Any] = []
if len(__a ):
_lowerCamelCase : Tuple = renew_attention_paths(__a )
_lowerCamelCase : Optional[Any] = {
'old': f'''output_blocks.{i}.1''',
'new': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
_lowerCamelCase : Any = {
f'''output_blocks.{i}.1.qkv.bias''': {
'key': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'query': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'value': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
f'''output_blocks.{i}.1.qkv.weight''': {
'key': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'query': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'value': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__a , __a , __a , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=__a , )
else:
_lowerCamelCase : Any = renew_resnet_paths(__a , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_lowerCamelCase : Any = '.'.join(['output_blocks', str(__a ), path['old']] )
_lowerCamelCase : List[str] = '.'.join(['up_blocks', str(__a ), 'resnets', str(__a ), path['new']] )
_lowerCamelCase : Optional[int] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
lowercase__ = parser.parse_args()
lowercase__ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
lowercase__ = json.loads(f.read())
lowercase__ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
lowercase__ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
lowercase__ = DDPMScheduler.from_config("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
lowercase__ = VQModel.from_pretrained("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
lowercase__ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path) | 371 |
"""simple docstring"""
import re
def _snake_case ( lowercase__ ):
_lowerCamelCase : Optional[int] = re.compile(r'^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$' )
if match := re.search(lowercase__ , lowercase__ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("""+918827897895""")) | 12 | 0 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
lowercase__ = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def _snake_case ( ):
_lowerCamelCase : Optional[Any] = Github(os.environ['GITHUB_TOKEN'] )
_lowerCamelCase : List[Any] = g.get_repo('huggingface/accelerate' )
_lowerCamelCase : Union[str, Any] = repo.get_issues(state='open' )
for issue in open_issues:
_lowerCamelCase : Union[str, Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowercase__ : i.created_at , reverse=__lowerCamelCase )
_lowerCamelCase : Any = comments[0] if len(__lowerCamelCase ) > 0 else None
_lowerCamelCase : Optional[Any] = dt.utcnow()
_lowerCamelCase : Optional[int] = (current_time - issue.updated_at).days
_lowerCamelCase : str = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main() | 350 |
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowercase__ = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowercase__ = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
lowercase__ = """zero2"""
lowercase__ = """zero3"""
lowercase__ = [ZEROa, ZEROa]
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_lowerCamelCase : List[str] = parameterized.to_safe_name('_'.join(str(lowercase__ ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
lowercase__ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
def A_ ( self , lowercase ):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = True , lowercase = True , lowercase = True , ):
_lowerCamelCase : List[str] = models[model]
_lowerCamelCase : Optional[int] = self.run_trainer(
stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , )
self.do_checks(lowercase )
return output_dir
def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = 1 , lowercase = True , lowercase = True , ):
_lowerCamelCase : List[str] = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase )
_lowerCamelCase : Any = F'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_lowerCamelCase : Optional[int] = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_lowerCamelCase : Optional[Any] = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_lowerCamelCase : Dict = self.get_launcher(lowercase )
_lowerCamelCase : Union[str, Any] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase , env=self.get_env() )
return output_dir
def A_ ( self , lowercase=False ):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
_lowerCamelCase : Any = min(2 , get_gpu_count() ) if distributed else 1
return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split() | 12 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ , lowercase__ = 0 ):
_lowerCamelCase : Optional[int] = length or len(_snake_case )
_lowerCamelCase : List[str] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_lowerCamelCase : str = list_data[i + 1], list_data[i]
_lowerCamelCase : Dict = True
return list_data if not swapped else bubble_sort(_snake_case , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 351 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = ["""pixel_values"""]
def __init__( self , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = 8 , **lowercase , ):
super().__init__(**lowercase )
_lowerCamelCase : Optional[Any] = do_rescale
_lowerCamelCase : Union[str, Any] = rescale_factor
_lowerCamelCase : Any = do_pad
_lowerCamelCase : Optional[int] = pad_size
def A_ ( self , lowercase , lowercase , lowercase = None , **lowercase ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A_ ( self , lowercase , lowercase , lowercase = None ):
_lowerCamelCase, _lowerCamelCase : Tuple = get_image_size(lowercase )
_lowerCamelCase : Union[str, Any] = (old_height // size + 1) * size - old_height
_lowerCamelCase : Tuple = (old_width // size + 1) * size - old_width
return pad(lowercase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=lowercase )
def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
_lowerCamelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Any = do_pad if do_pad is not None else self.do_pad
_lowerCamelCase : int = pad_size if pad_size is not None else self.pad_size
_lowerCamelCase : Dict = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
_lowerCamelCase : Dict = [to_numpy_array(lowercase ) for image in images]
if do_rescale:
_lowerCamelCase : str = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_pad:
_lowerCamelCase : str = [self.pad(lowercase , size=lowercase ) for image in images]
_lowerCamelCase : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
_lowerCamelCase : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=lowercase , tensor_type=lowercase ) | 12 | 0 |
import requests
lowercase__ = """https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="""
def _snake_case ( lowercase__ ):
_lowerCamelCase : Dict = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(f'''{i}.) {article['title']}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="""<Your BBC News API key goes here>""") | 352 |
"""simple docstring"""
import os
import string
import sys
lowercase__ = 1 << 8
lowercase__ = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
lowercase__ = KEYMAP["""up"""]
lowercase__ = KEYMAP["""left"""]
if sys.platform == "win32":
lowercase__ = []
lowercase__ = {
B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
lowercase__ = ord(str(i))
def _snake_case ( ):
if os.name == "nt":
import msvcrt
_lowerCamelCase : Any = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(lowercase__ ) == 0:
# Read the keystroke
_lowerCamelCase : str = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_lowerCamelCase : List[Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_lowerCamelCase : Union[str, Any] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(lowercase__ )
if ord(lowercase__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_lowerCamelCase : List[Any] = chr(KEYMAP['esc'] )
except KeyError:
_lowerCamelCase : int = cha[1]
else:
_lowerCamelCase : Optional[int] = ch.decode(lowercase__ )
else:
_lowerCamelCase : Union[str, Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_lowerCamelCase : List[str] = sys.stdin.fileno()
_lowerCamelCase : Tuple = termios.tcgetattr(lowercase__ )
try:
tty.setraw(lowercase__ )
_lowerCamelCase : Optional[Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(lowercase__ , termios.TCSADRAIN , lowercase__ )
return ch
def _snake_case ( ):
_lowerCamelCase : int = get_raw_chars()
if ord(lowercase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(lowercase__ ) == KEYMAP["esc"]:
_lowerCamelCase : Union[str, Any] = get_raw_chars()
if ord(lowercase__ ) == KEYMAP["mod_int"]:
_lowerCamelCase : List[Any] = get_raw_chars()
if ord(lowercase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowercase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(lowercase__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"] | 12 | 0 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase__ = re.compile("""[^A-Za-z_0-9]""")
# parameters used in DuplicationIndex
lowercase__ = 10
lowercase__ = 256
def _snake_case ( lowercase__ ):
"""simple docstring"""
if len(lowercase__ ) < MIN_NUM_TOKENS:
return None
_lowerCamelCase : List[Any] = MinHash(num_perm=lowercase__ )
for token in set(lowercase__ ):
min_hash.update(token.encode() )
return min_hash
def _snake_case ( lowercase__ ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(lowercase__ ) if len(t.strip() ) > 0}
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , *,
lowercase = 0.85 , ):
_lowerCamelCase : Any = duplication_jaccard_threshold
_lowerCamelCase : Dict = NUM_PERM
_lowerCamelCase : Dict = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_lowerCamelCase : Tuple = defaultdict(__UpperCAmelCase )
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : Dict = self._index.query(__UpperCAmelCase )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(__UpperCAmelCase , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__UpperCAmelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__UpperCAmelCase )
def A_ ( self ):
_lowerCamelCase : Dict = []
for base, duplicates in self._duplicate_clusters.items():
_lowerCamelCase : Optional[int] = [base] + list(__UpperCAmelCase )
# reformat the cluster to be a list of dict
_lowerCamelCase : Optional[int] = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(__UpperCAmelCase )
return duplicate_clusters
def A_ ( self , lowercase ):
_lowerCamelCase : int = self.get_duplicate_clusters()
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def _snake_case ( lowercase__ ):
"""simple docstring"""
_lowerCamelCase, _lowerCamelCase : Dict = element
_lowerCamelCase : Optional[int] = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _snake_case ( lowercase__ ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowercase__ , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def _snake_case ( lowercase__ , lowercase__ ):
"""simple docstring"""
_lowerCamelCase : List[str] = DuplicationIndex(duplication_jaccard_threshold=lowercase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowercase__ ) ) , max_queue_size=100 ) ):
di.add(lowercase__ , lowercase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _snake_case ( lowercase__ , lowercase__ ):
"""simple docstring"""
_lowerCamelCase : Any = get_tokens(lowercase__ )
_lowerCamelCase : List[str] = get_tokens(lowercase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowercase__ = None
def _snake_case ( lowercase__ , lowercase__ ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = []
for elementa in cluster:
_lowerCamelCase : List[str] = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
_lowerCamelCase : Union[str, Any] = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(lowercase__ , lowercase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCamelCase : Tuple = 1
extremes.append(lowercase__ )
return extremes
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
global _shared_dataset
_lowerCamelCase : int = dataset
_lowerCamelCase : Tuple = []
_lowerCamelCase : Dict = partial(_find_cluster_extremes_shared , jaccard_threshold=lowercase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowercase__ , lowercase__ , ) , total=len(lowercase__ ) , ):
extremes_list.append(lowercase__ )
return extremes_list
def _snake_case ( lowercase__ , lowercase__ = 0.8_5 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = make_duplicate_clusters(lowercase__ , lowercase__ )
_lowerCamelCase : Dict = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Union[str, Any] = find_extremes(lowercase__ , lowercase__ , lowercase__ )
for extremes in extremes_clusters:
for element in extremes:
_lowerCamelCase : Optional[Any] = element
_lowerCamelCase : int = duplicate_indices - set(extreme_dict.keys() )
_lowerCamelCase : Union[str, Any] = dataset.filter(lambda lowercase__ , lowercase__ : idx not in remove_indices , with_indices=lowercase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCamelCase : Union[str, Any] = element['base_index'] in extreme_dict
if element["is_extreme"]:
_lowerCamelCase : str = extreme_dict[element['base_index']]['copies']
print(f'''Original dataset size: {len(lowercase__ )}''' )
print(f'''Number of duplicate clusters: {len(lowercase__ )}''' )
print(f'''Files in duplicate cluster: {len(lowercase__ )}''' )
print(f'''Unique files in duplicate cluster: {len(lowercase__ )}''' )
print(f'''Filtered dataset size: {len(lowercase__ )}''' )
return ds_filter, duplicate_clusters | 353 |
"""simple docstring"""
from typing import Any
def _snake_case ( lowercase__ ):
if not input_list:
return []
_lowerCamelCase : Any = [input_list.count(lowercase__ ) for value in input_list]
_lowerCamelCase : Dict = max(lowercase__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod() | 12 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowercase__ = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None ):
_lowerCamelCase : Union[str, Any] = XLNetConfig.from_json_file(snake_case_ )
_lowerCamelCase : Dict = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
_lowerCamelCase : Union[str, Any] = finetuning_task
_lowerCamelCase : List[str] = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCamelCase : Optional[Any] = XLNetForSequenceClassification(snake_case_ )
elif "squad" in finetuning_task:
_lowerCamelCase : Dict = finetuning_task
_lowerCamelCase : List[str] = XLNetForQuestionAnswering(snake_case_ )
else:
_lowerCamelCase : int = XLNetLMHeadModel(snake_case_ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
_lowerCamelCase : List[str] = os.path.join(snake_case_ , snake_case_ )
_lowerCamelCase : int = os.path.join(snake_case_ , snake_case_ )
print(f'''Save PyTorch model to {os.path.abspath(snake_case_ )}''' )
torch.save(model.state_dict() , snake_case_ )
print(f'''Save configuration file to {os.path.abspath(snake_case_ )}''' )
with open(snake_case_ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
lowercase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
) | 354 |
"""simple docstring"""
def _snake_case ( lowercase__ ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
_lowerCamelCase : List[str] = len(lowercase__ )
_lowerCamelCase : List[str] = max(lowercase__ )
_lowerCamelCase : List[str] = min(lowercase__ )
# create the counting array
_lowerCamelCase : List[Any] = coll_max + 1 - coll_min
_lowerCamelCase : List[Any] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowercase__ ):
_lowerCamelCase : Optional[int] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
_lowerCamelCase : Dict = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowercase__ ) ):
_lowerCamelCase : Any = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _snake_case ( lowercase__ ):
return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
lowercase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase__ = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted)) | 12 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowercase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 355 |
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
lowercase__ = parser.parse_args()
lowercase__ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 12 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase__ = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 356 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = (UnCLIPScheduler,)
def A_ ( self , **lowercase ):
_lowerCamelCase : Any = {
'num_train_timesteps': 1000,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**lowercase )
return config
def A_ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase )
def A_ ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowercase )
def A_ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase )
def A_ ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowercase )
def A_ ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowercase )
def A_ ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowercase , prev_timestep=lowercase )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[int] = self.get_scheduler_config(variance_type='fixed_small_log' )
_lowerCamelCase : str = scheduler_class(**lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5
def A_ ( self ):
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config(variance_type='learned_range' )
_lowerCamelCase : int = scheduler_class(**lowercase )
_lowerCamelCase : List[str] = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowercase ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=lowercase ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=lowercase ) - -0.0_01_00_11 < 1E-5
def A_ ( self ):
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config()
_lowerCamelCase : Tuple = scheduler_class(**lowercase )
_lowerCamelCase : Union[str, Any] = scheduler.timesteps
_lowerCamelCase : Any = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter
_lowerCamelCase : Optional[int] = torch.manual_seed(0 )
for i, t in enumerate(lowercase ):
# 1. predict noise residual
_lowerCamelCase : Tuple = model(lowercase , lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : List[Any] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample
_lowerCamelCase : Optional[int] = pred_prev_sample
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) )
_lowerCamelCase : List[Any] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def A_ ( self ):
_lowerCamelCase : Tuple = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : Optional[Any] = scheduler_class(**lowercase )
scheduler.set_timesteps(25 )
_lowerCamelCase : Optional[Any] = scheduler.timesteps
_lowerCamelCase : Optional[int] = self.dummy_model()
_lowerCamelCase : Any = self.dummy_sample_deter
_lowerCamelCase : str = torch.manual_seed(0 )
for i, t in enumerate(lowercase ):
# 1. predict noise residual
_lowerCamelCase : List[Any] = model(lowercase , lowercase )
if i + 1 == timesteps.shape[0]:
_lowerCamelCase : Optional[int] = None
else:
_lowerCamelCase : List[str] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : Union[str, Any] = scheduler.step(
lowercase , lowercase , lowercase , prev_timestep=lowercase , generator=lowercase ).prev_sample
_lowerCamelCase : List[Any] = pred_prev_sample
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) )
_lowerCamelCase : List[str] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def A_ ( self ):
pass
def A_ ( self ):
pass | 12 | 0 |
"""simple docstring"""
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ = 16
lowercase__ = 32
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 16 ):
_lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
_lowerCamelCase : Tuple = DatasetDict(
{
'train': dataset['train'].select(lowercase__ ),
'validation': dataset['train'].select(lowercase__ ),
'test': dataset['validation'],
} )
def tokenize_function(lowercase__ ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : Optional[int] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCamelCase : Optional[int] = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCamelCase : str = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowercase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCamelCase : List[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCamelCase : int = 16
elif accelerator.mixed_precision != "no":
_lowerCamelCase : int = 8
else:
_lowerCamelCase : int = None
return tokenizer.pad(
lowercase__ , padding='longest' , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors='pt' , )
# Instantiate dataloaders.
_lowerCamelCase : str = DataLoader(
tokenized_datasets['train'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
_lowerCamelCase : Union[str, Any] = DataLoader(
tokenized_datasets['validation'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
_lowerCamelCase : Optional[Any] = DataLoader(
tokenized_datasets['test'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader, test_dataloader
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Union[str, Any] = []
# Download the dataset
_lowerCamelCase : List[str] = load_dataset('glue' , 'mrpc' )
# Create our splits
_lowerCamelCase : Union[str, Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_lowerCamelCase : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCamelCase : Optional[int] = config['lr']
_lowerCamelCase : int = int(config['num_epochs'] )
_lowerCamelCase : Optional[Any] = int(config['seed'] )
_lowerCamelCase : Optional[int] = int(config['batch_size'] )
_lowerCamelCase : Dict = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
_lowerCamelCase : Optional[int] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_lowerCamelCase : int = batch_size // MAX_GPU_BATCH_SIZE
_lowerCamelCase : Optional[Any] = MAX_GPU_BATCH_SIZE
set_seed(lowercase__ )
# New Code #
# Create our folds:
_lowerCamelCase : Tuple = kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] )
_lowerCamelCase : List[Any] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowercase__ ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = get_fold_dataloaders(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCamelCase : str = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowercase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCamelCase : Dict = model.to(accelerator.device )
# Instantiate optimizer
_lowerCamelCase : List[str] = AdamW(params=model.parameters() , lr=lowercase__ )
# Instantiate scheduler
_lowerCamelCase : Dict = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=100 , num_training_steps=(len(lowercase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Now we train the model
for epoch in range(lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_lowerCamelCase : Optional[Any] = model(**lowercase__ )
_lowerCamelCase : Any = outputs.loss
_lowerCamelCase : Union[str, Any] = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCamelCase : Any = model(**lowercase__ )
_lowerCamelCase : int = outputs.logits.argmax(dim=-1 )
_lowerCamelCase, _lowerCamelCase : Dict = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
_lowerCamelCase : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowercase__ )
# New Code #
# We also run predictions on the test set at the very end
_lowerCamelCase : Dict = []
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCamelCase : str = model(**lowercase__ )
_lowerCamelCase : str = outputs.logits
_lowerCamelCase, _lowerCamelCase : Dict = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowercase__ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_lowerCamelCase : Tuple = torch.cat(lowercase__ , dim=0 )
_lowerCamelCase : int = torch.stack(lowercase__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_lowerCamelCase : Union[str, Any] = metric.compute(predictions=lowercase__ , references=lowercase__ )
accelerator.print('Average test metrics from all folds:' , lowercase__ )
def _snake_case ( ):
_lowerCamelCase : Optional[int] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowercase__ , default=lowercase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds' , type=lowercase__ , default=3 , help='The number of splits to perform across the dataset' )
_lowerCamelCase : int = parser.parse_args()
_lowerCamelCase : Tuple = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main() | 357 |
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """data2vec-audio"""
def __init__( self , lowercase=32 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-5 , lowercase="gelu" , lowercase=(512, 512, 512, 512, 512, 512, 512) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(10, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=16 , lowercase=19 , lowercase=5 , lowercase=0.05 , lowercase=10 , lowercase=2 , lowercase=0.0 , lowercase=10 , lowercase=0 , lowercase="sum" , lowercase=False , lowercase=False , lowercase=256 , lowercase=(512, 512, 512, 512, 1500) , lowercase=(5, 3, 3, 1, 1) , lowercase=(1, 2, 3, 1, 1) , lowercase=512 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=3 , lowercase=2 , lowercase=3 , lowercase=None , **lowercase , ):
super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase )
_lowerCamelCase : str = hidden_size
_lowerCamelCase : str = feat_extract_activation
_lowerCamelCase : Optional[Any] = list(lowercase )
_lowerCamelCase : Dict = list(lowercase )
_lowerCamelCase : Dict = list(lowercase )
_lowerCamelCase : Optional[Any] = conv_bias
_lowerCamelCase : Union[str, Any] = num_conv_pos_embeddings
_lowerCamelCase : List[Any] = num_conv_pos_embedding_groups
_lowerCamelCase : List[Any] = conv_pos_kernel_size
_lowerCamelCase : Optional[int] = len(self.conv_dim )
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Any = hidden_dropout
_lowerCamelCase : Union[str, Any] = attention_dropout
_lowerCamelCase : str = activation_dropout
_lowerCamelCase : Any = feat_proj_dropout
_lowerCamelCase : Tuple = final_dropout
_lowerCamelCase : Union[str, Any] = layerdrop
_lowerCamelCase : List[Any] = layer_norm_eps
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : Optional[int] = vocab_size
_lowerCamelCase : Tuple = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase : Optional[Any] = mask_time_prob
_lowerCamelCase : List[Any] = mask_time_length
_lowerCamelCase : List[Any] = mask_time_min_masks
_lowerCamelCase : Tuple = mask_feature_prob
_lowerCamelCase : Optional[Any] = mask_feature_length
_lowerCamelCase : Dict = mask_feature_min_masks
# ctc loss
_lowerCamelCase : Tuple = ctc_loss_reduction
_lowerCamelCase : str = ctc_zero_infinity
# adapter
_lowerCamelCase : Union[str, Any] = add_adapter
_lowerCamelCase : List[Any] = adapter_kernel_size
_lowerCamelCase : Optional[Any] = adapter_stride
_lowerCamelCase : List[Any] = num_adapter_layers
_lowerCamelCase : int = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCamelCase : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCamelCase : List[str] = list(lowercase )
_lowerCamelCase : Optional[Any] = list(lowercase )
_lowerCamelCase : Any = list(lowercase )
_lowerCamelCase : Optional[Any] = xvector_output_dim
@property
def A_ ( self ):
return math.prod(self.conv_stride ) | 12 | 0 |
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase__ :
'''simple docstring'''
@staticmethod
def A_ ( *lowercase , **lowercase ):
pass
@is_pipeline_test
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def A_ ( self ):
_lowerCamelCase : Optional[Any] = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
_lowerCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCamelCase : List[Any] = image_classifier(a_ , candidate_labels=['a', 'b', 'c'] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(a_ ) , [
[{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'b'}, {'score': 0.3_33, 'label': 'c'}],
[{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'c'}, {'score': 0.3_33, 'label': 'b'}],
] , )
_lowerCamelCase : Optional[int] = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(a_ ) , [
[
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
],
[
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
],
[
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
],
[
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
],
[
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
],
] , )
@require_tf
def A_ ( self ):
_lowerCamelCase : str = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf' )
_lowerCamelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCamelCase : Union[str, Any] = image_classifier(a_ , candidate_labels=['a', 'b', 'c'] )
self.assertEqual(
nested_simplify(a_ ) , [{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'b'}, {'score': 0.3_33, 'label': 'c'}] , )
_lowerCamelCase : int = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(a_ ) , [
[
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
],
[
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
],
[
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
],
[
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
],
[
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
],
] , )
@slow
@require_torch
def A_ ( self ):
_lowerCamelCase : List[str] = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
_lowerCamelCase : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCamelCase : Union[str, Any] = image_classifier(a_ , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(a_ ) , [
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
] , )
_lowerCamelCase : Union[str, Any] = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(a_ ) , [
[
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def A_ ( self ):
_lowerCamelCase : Optional[int] = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf' )
# This is an image of 2 cats with remotes and no planes
_lowerCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCamelCase : Optional[int] = image_classifier(a_ , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(a_ ) , [
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
] , )
_lowerCamelCase : List[Any] = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(a_ ) , [
[
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
],
]
* 5 , )
| 358 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
lowercase__ = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """facebook/nllb-200-distilled-600M"""
lowerCamelCase__ = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
lowerCamelCase__ = """translator"""
lowerCamelCase__ = AutoTokenizer
lowerCamelCase__ = AutoModelForSeqaSeqLM
lowerCamelCase__ = LANGUAGE_CODES
lowerCamelCase__ = ["""text""", """text""", """text"""]
lowerCamelCase__ = ["""text"""]
def A_ ( self , lowercase , lowercase , lowercase ):
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''' )
_lowerCamelCase : str = self.lang_to_code[src_lang]
_lowerCamelCase : int = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowercase , return_tensors='pt' , src_lang=lowercase , tgt_lang=lowercase )
def A_ ( self , lowercase ):
return self.model.generate(**lowercase )
def A_ ( self , lowercase ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowercase ) | 12 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""",
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = 'switch_transformers'
lowerCamelCase__ = ['past_key_values']
lowerCamelCase__ = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , lowercase=32128 , lowercase=768 , lowercase=64 , lowercase=2048 , lowercase=64 , lowercase=12 , lowercase=3 , lowercase=12 , lowercase=3 , lowercase=12 , lowercase=8 , lowercase=False , lowercase=0.01 , lowercase="float32" , lowercase=False , lowercase=32 , lowercase=128 , lowercase=0.1 , lowercase=1E-6 , lowercase=0.0_01 , lowercase=0.0_01 , lowercase=1.0 , lowercase="relu" , lowercase=True , lowercase=False , lowercase=True , lowercase=0 , lowercase=1 , **lowercase , ):
_lowerCamelCase : Optional[int] = vocab_size
_lowerCamelCase : Tuple = d_model
_lowerCamelCase : Optional[int] = d_kv
_lowerCamelCase : int = d_ff
_lowerCamelCase : str = num_sparse_encoder_layers
_lowerCamelCase : Union[str, Any] = num_layers
_lowerCamelCase : Optional[int] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_lowerCamelCase : Union[str, Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
_lowerCamelCase : Union[str, Any] = self.num_layers // self.num_sparse_encoder_layers
else:
_lowerCamelCase : str = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
_lowerCamelCase : Tuple = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
_lowerCamelCase : Tuple = self.num_decoder_layers # HACK: this will create 0 sparse layers
_lowerCamelCase : List[str] = num_heads
_lowerCamelCase : str = num_experts
_lowerCamelCase : int = expert_capacity
_lowerCamelCase : Dict = router_bias
_lowerCamelCase : List[Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
_lowerCamelCase : List[Any] = router_dtype
_lowerCamelCase : Dict = router_ignore_padding_tokens
_lowerCamelCase : Tuple = relative_attention_num_buckets
_lowerCamelCase : str = relative_attention_max_distance
_lowerCamelCase : List[str] = dropout_rate
_lowerCamelCase : str = layer_norm_epsilon
_lowerCamelCase : str = initializer_factor
_lowerCamelCase : Tuple = feed_forward_proj
_lowerCamelCase : Dict = use_cache
_lowerCamelCase : Optional[Any] = add_router_probs
_lowerCamelCase : List[str] = router_z_loss_coef
_lowerCamelCase : Dict = router_aux_loss_coef
_lowerCamelCase : int = self.feed_forward_proj.split('-' )
_lowerCamelCase : int = act_info[-1]
_lowerCamelCase : int = act_info[0] == 'gated'
if len(__lowerCAmelCase ) > 1 and act_info[0] != "gated" or len(__lowerCAmelCase ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_lowerCamelCase : Optional[int] = 'gelu_new'
super().__init__(
pad_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , **__lowerCAmelCase , ) | 359 |
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def A_ ( self , lowercase , lowercase , lowercase ):
_lowerCamelCase : Optional[int] = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
_lowerCamelCase : Tuple = VideoClassificationPipeline(model=lowercase , image_processor=lowercase , top_k=2 )
_lowerCamelCase : List[str] = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def A_ ( self , lowercase , lowercase ):
for example in examples:
_lowerCamelCase : Tuple = video_classifier(lowercase )
self.assertEqual(
lowercase , [
{'score': ANY(lowercase ), 'label': ANY(lowercase )},
{'score': ANY(lowercase ), 'label': ANY(lowercase )},
] , )
@require_torch
def A_ ( self ):
_lowerCamelCase : Optional[Any] = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
_lowerCamelCase : Tuple = VideoMAEFeatureExtractor(
size={'shortest_edge': 10} , crop_size={'height': 10, 'width': 10} )
_lowerCamelCase : Dict = pipeline(
'video-classification' , model=lowercase , feature_extractor=lowercase , frame_sampling_rate=4 )
_lowerCamelCase : Any = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
_lowerCamelCase : Dict = video_classifier(lowercase , top_k=2 )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}] , )
_lowerCamelCase : str = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
[{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}],
[{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}],
] , )
@require_tf
def A_ ( self ):
pass | 12 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """open-llama"""
def __init__( self , lowercase=100000 , lowercase=4096 , lowercase=11008 , lowercase=32 , lowercase=32 , lowercase="silu" , lowercase=2048 , lowercase=0.02 , lowercase=1E-6 , lowercase=True , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=True , lowercase=0.1 , lowercase=0.1 , lowercase=True , lowercase=True , lowercase=None , **lowercase , ):
_lowerCamelCase : Optional[int] = vocab_size
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : str = hidden_size
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : Dict = num_attention_heads
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Optional[Any] = rms_norm_eps
_lowerCamelCase : Dict = use_cache
_lowerCamelCase : Optional[Any] = kwargs.pop(
'use_memorry_efficient_attention' , _snake_case )
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_dropout_prob
_lowerCamelCase : Optional[Any] = use_stable_embedding
_lowerCamelCase : str = shared_input_output_embedding
_lowerCamelCase : List[str] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , tie_word_embeddings=_snake_case , **_snake_case , )
def A_ ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _snake_case ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F'''got {self.rope_scaling}''' )
_lowerCamelCase : Optional[int] = self.rope_scaling.get('type' , _snake_case )
_lowerCamelCase : str = self.rope_scaling.get('factor' , _snake_case )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(_snake_case , _snake_case ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' ) | 360 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase__ = {
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 12 | 0 |
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowercase__ = """\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
lowercase__ = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
lowercase__ = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def A_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def A_ ( self , lowercase , lowercase , lowercase = 1 , lowercase = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowercase , hypotheses=lowercase , min_len=lowercase , max_len=lowercase )
}
| 361 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _snake_case ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None ):
if attention_mask is None:
_lowerCamelCase : List[str] = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = OPTConfig
lowerCamelCase__ = {}
lowerCamelCase__ = """gelu"""
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=16 , lowercase=2 , lowercase=4 , lowercase=4 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , lowercase=16 , lowercase=16 , ):
_lowerCamelCase : Tuple = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : str = is_training
_lowerCamelCase : Optional[int] = use_labels
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[str] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : List[Any] = eos_token_id
_lowerCamelCase : Tuple = pad_token_id
_lowerCamelCase : List[str] = bos_token_id
_lowerCamelCase : Optional[int] = embed_dim
_lowerCamelCase : List[str] = word_embed_proj_dim
_lowerCamelCase : Any = False
def A_ ( self ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowerCamelCase : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowerCamelCase : str = tf.concat([input_ids, eos_tensor] , axis=1 )
_lowerCamelCase : Tuple = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase , **self.config_updates , )
_lowerCamelCase : int = prepare_opt_inputs_dict(lowercase , lowercase )
return config, inputs_dict
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : Optional[Any] = TFOPTModel(config=lowercase )
_lowerCamelCase : Optional[Any] = inputs_dict['input_ids']
_lowerCamelCase : str = input_ids[:1, :]
_lowerCamelCase : Dict = inputs_dict['attention_mask'][:1, :]
_lowerCamelCase : Optional[Any] = 1
# first forward pass
_lowerCamelCase : Any = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
_lowerCamelCase, _lowerCamelCase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCamelCase : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCamelCase : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_lowerCamelCase : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
_lowerCamelCase : Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_lowerCamelCase : Optional[Any] = model(lowercase , attention_mask=lowercase )[0]
_lowerCamelCase : List[str] = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_lowerCamelCase : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_lowerCamelCase : Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
_lowerCamelCase : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
@require_tf
class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowerCamelCase__ = (TFOPTForCausalLM,) if is_tf_available() else ()
lowerCamelCase__ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = 10
def A_ ( self ):
_lowerCamelCase : int = TFOPTModelTester(self )
_lowerCamelCase : Tuple = ConfigTester(self , config_class=lowercase )
def A_ ( self ):
self.config_tester.run_common_tests()
def A_ ( self ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowercase , lowercase ):
if hasattr(lowercase , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowercase , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
_lowerCamelCase : Optional[int] = model_class(config=lowercase )
_lowerCamelCase : int = _get_word_embedding_weight(lowercase , model.get_input_embeddings() )
_lowerCamelCase : Tuple = _get_word_embedding_weight(lowercase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowercase )
_lowerCamelCase : str = _get_word_embedding_weight(lowercase , model.get_input_embeddings() )
_lowerCamelCase : Any = _get_word_embedding_weight(lowercase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_lowerCamelCase : Union[str, Any] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowercase )
# check that weights remain the same after resizing
_lowerCamelCase : int = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_lowerCamelCase : Optional[Any] = False
self.assertTrue(lowercase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowercase )
_lowerCamelCase : Dict = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_lowerCamelCase : Union[str, Any] = False
self.assertTrue(lowercase )
def _snake_case ( lowercase__ ):
return tf.constant(lowercase__ , dtype=tf.intaa )
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = 99
def A_ ( self ):
_lowerCamelCase : Tuple = tf.ones((4, 1) , dtype=tf.intaa ) * 2
_lowerCamelCase : Tuple = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
_lowerCamelCase : int = input_ids.shape[0]
_lowerCamelCase : List[Any] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A_ ( self ):
_lowerCamelCase : Tuple = TFOPTModel.from_pretrained('facebook/opt-350m' )
_lowerCamelCase : List[Any] = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_lowerCamelCase : List[str] = tf.not_equal(lowercase , model.config.pad_token_id )
with tf.GradientTape():
_lowerCamelCase : List[str] = model(input_ids=lowercase , attention_mask=lowercase ).last_hidden_state
_lowerCamelCase : Optional[Any] = (1, 11, 512)
self.assertEqual(output.shape , lowercase )
_lowerCamelCase : List[str] = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-3 ) )
_lowerCamelCase : List[str] = tf.function(lowercase , jit_compile=lowercase )
_lowerCamelCase : Union[str, Any] = xla_generate(lowercase , lowercase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-2 ) )
@require_tf
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
super().setUp()
_lowerCamelCase : List[Any] = 'facebook/opt-350m'
def A_ ( self ):
_lowerCamelCase : int = TFOPTForCausalLM.from_pretrained(self.path_model )
_lowerCamelCase : List[Any] = GPTaTokenizer.from_pretrained(self.path_model )
_lowerCamelCase : List[str] = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_lowerCamelCase : List[Any] = tokenizer(lowercase , return_tensors='tf' , padding=lowercase , add_special_tokens=lowercase )
_lowerCamelCase : Optional[int] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
_lowerCamelCase : Any = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) )
_lowerCamelCase : Tuple = tf.function(lowercase , jit_compile=lowercase )
_lowerCamelCase : List[Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) )
@require_tf
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def A_ ( self ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def A_ ( self ):
_lowerCamelCase : str = 'facebook/opt-125m'
_lowerCamelCase : Dict = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Optional[int] = GPTaTokenizer.from_pretrained(lowercase )
_lowerCamelCase : Dict = TFOPTForCausalLM.from_pretrained(lowercase )
for prompt in self.prompts:
_lowerCamelCase : int = tokenizer(lowercase , return_tensors='tf' ).input_ids
_lowerCamelCase : int = model.generate(lowercase , max_length=10 )
_lowerCamelCase : Any = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
predicted_outputs += generated_string
self.assertListEqual(lowercase , lowercase )
def A_ ( self ):
_lowerCamelCase : List[Any] = 'facebook/opt-350m'
_lowerCamelCase : int = GPTaTokenizer.from_pretrained(lowercase )
_lowerCamelCase : Optional[int] = TFOPTForCausalLM.from_pretrained(lowercase )
_lowerCamelCase : Any = 'left'
# use different length sentences to test batching
_lowerCamelCase : Optional[int] = [
'Hello, my dog is a little',
'Today, I',
]
_lowerCamelCase : Dict = tokenizer(lowercase , return_tensors='tf' , padding=lowercase )
_lowerCamelCase : int = inputs['input_ids']
_lowerCamelCase : Tuple = model.generate(input_ids=lowercase , attention_mask=inputs['attention_mask'] )
_lowerCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
_lowerCamelCase : Union[str, Any] = model.generate(input_ids=lowercase )
_lowerCamelCase : Dict = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
_lowerCamelCase : int = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
_lowerCamelCase : Union[str, Any] = model.generate(input_ids=lowercase , max_length=model.config.max_length - num_paddings )
_lowerCamelCase : List[Any] = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
_lowerCamelCase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase )
_lowerCamelCase : Optional[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase )
_lowerCamelCase : Optional[Any] = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , [non_padded_sentence, padded_sentence] )
def A_ ( self ):
_lowerCamelCase : Tuple = 'facebook/opt-350m'
_lowerCamelCase : List[Any] = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Optional[Any] = GPTaTokenizer.from_pretrained(lowercase )
_lowerCamelCase : Optional[Any] = TFOPTForCausalLM.from_pretrained(lowercase )
for prompt in self.prompts:
_lowerCamelCase : List[Any] = tokenizer(lowercase , return_tensors='tf' ).input_ids
_lowerCamelCase : Optional[Any] = model.generate(lowercase , max_length=10 )
_lowerCamelCase : Dict = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
predicted_outputs += generated_string
self.assertListEqual(lowercase , lowercase ) | 12 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ ):
if collection == []:
return []
# get some information about the collection
_lowerCamelCase : Optional[int] = len(UpperCamelCase__ )
_lowerCamelCase : List[Any] = max(UpperCamelCase__ )
_lowerCamelCase : Optional[Any] = min(UpperCamelCase__ )
# create the counting array
_lowerCamelCase : Union[str, Any] = coll_max + 1 - coll_min
_lowerCamelCase : List[Any] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCamelCase__ ):
_lowerCamelCase : str = counting_arr[i] + counting_arr[i - 1]
# create the output collection
_lowerCamelCase : Any = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCamelCase__ ) ):
_lowerCamelCase : List[str] = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _snake_case ( lowercase__ ):
return "".join([chr(UpperCamelCase__ ) for i in counting_sort([ord(UpperCamelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
lowercase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase__ = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted)) | 362 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """philschmid/bart-large-cnn-samsum"""
lowerCamelCase__ = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
lowerCamelCase__ = """summarizer"""
lowerCamelCase__ = AutoTokenizer
lowerCamelCase__ = AutoModelForSeqaSeqLM
lowerCamelCase__ = ["""text"""]
lowerCamelCase__ = ["""text"""]
def A_ ( self , lowercase ):
return self.pre_processor(lowercase , return_tensors='pt' , truncation=lowercase )
def A_ ( self , lowercase ):
return self.model.generate(**lowercase )[0]
def A_ ( self , lowercase ):
return self.pre_processor.decode(lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase ) | 12 | 0 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowercase__ = logging.get_logger(__name__)
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
def constraint_to_multiple_of(lowercase__ , lowercase__ , lowercase__=0 , lowercase__=None ):
_lowerCamelCase : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_lowerCamelCase : Tuple = math.floor(val / multiple ) * multiple
if x < min_val:
_lowerCamelCase : Union[str, Any] = math.ceil(val / multiple ) * multiple
return x
_lowerCamelCase : Union[str, Any] = (output_size, output_size) if isinstance(_a , _a ) else output_size
_lowerCamelCase : Tuple = get_image_size(_a )
_lowerCamelCase : str = output_size
# determine new height and width
_lowerCamelCase : Dict = output_height / input_height
_lowerCamelCase : Dict = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_lowerCamelCase : List[str] = scale_width
else:
# fit height
_lowerCamelCase : Tuple = scale_height
_lowerCamelCase : List[str] = constraint_to_multiple_of(scale_height * input_height , multiple=_a )
_lowerCamelCase : Any = constraint_to_multiple_of(scale_width * input_width , multiple=_a )
return (new_height, new_width)
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCamelCase__ = ['pixel_values']
def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BILINEAR , lowercase = False , lowercase = 1 , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , **lowercase , ):
super().__init__(**_SCREAMING_SNAKE_CASE )
_lowerCamelCase : int = size if size is not None else {"height": 384, "width": 384}
_lowerCamelCase : List[Any] = get_size_dict(_SCREAMING_SNAKE_CASE )
_lowerCamelCase : List[Any] = do_resize
_lowerCamelCase : List[str] = size
_lowerCamelCase : Union[str, Any] = keep_aspect_ratio
_lowerCamelCase : Dict = ensure_multiple_of
_lowerCamelCase : Tuple = resample
_lowerCamelCase : str = do_rescale
_lowerCamelCase : Union[str, Any] = rescale_factor
_lowerCamelCase : str = do_normalize
_lowerCamelCase : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCamelCase : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self , lowercase , lowercase , lowercase = False , lowercase = 1 , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ):
_lowerCamelCase : Tuple = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
_lowerCamelCase : str = get_resize_output_image_size(
_SCREAMING_SNAKE_CASE , output_size=(size['height'], size['width']) , keep_aspect_ratio=_SCREAMING_SNAKE_CASE , multiple=_SCREAMING_SNAKE_CASE , )
return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A_ ( self , lowercase , lowercase , lowercase = None , **lowercase , ):
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A_ ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ):
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
_lowerCamelCase : List[Any] = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase : List[Any] = size if size is not None else self.size
_lowerCamelCase : Any = get_size_dict(_SCREAMING_SNAKE_CASE )
_lowerCamelCase : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_lowerCamelCase : Dict = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_lowerCamelCase : Optional[Any] = resample if resample is not None else self.resample
_lowerCamelCase : int = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase : List[Any] = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase : List[Any] = image_std if image_std is not None else self.image_std
_lowerCamelCase : str = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_lowerCamelCase : Any = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
_lowerCamelCase : Dict = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
_lowerCamelCase : Tuple = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
_lowerCamelCase : Optional[Any] = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE ) for image in images]
_lowerCamelCase : str = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
_lowerCamelCase : List[Any] = {"pixel_values": images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
def A_ ( self , lowercase , lowercase = None ):
_lowerCamelCase : str = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(_SCREAMING_SNAKE_CASE ):
_lowerCamelCase : Any = target_sizes.numpy()
_lowerCamelCase : Union[str, Any] = []
for idx in range(len(_SCREAMING_SNAKE_CASE ) ):
_lowerCamelCase : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Dict = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_SCREAMING_SNAKE_CASE )
else:
_lowerCamelCase : Any = logits.argmax(dim=1 )
_lowerCamelCase : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 363 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : Tuple = list(range(len(lowercase__ ) ) )
_lowerCamelCase : Any = [v / w for v, w in zip(lowercase__ , lowercase__ )]
index.sort(key=lambda lowercase__ : ratio[i] , reverse=lowercase__ )
_lowerCamelCase : float = 0
_lowerCamelCase : list[float] = [0] * len(lowercase__ )
for i in index:
if weight[i] <= capacity:
_lowerCamelCase : int = 1
max_value += value[i]
capacity -= weight[i]
else:
_lowerCamelCase : Any = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 12 | 0 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
# TODO Update this
lowercase__ = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowerCAmelCase__ ( snake_case__ ):
'''simple docstring'''
lowerCamelCase__ = """esm"""
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase=0.1 , lowercase=0.1 , lowercase=1026 , lowercase=0.02 , lowercase=1E-12 , lowercase="absolute" , lowercase=True , lowercase=None , lowercase=False , lowercase=False , lowercase=None , lowercase=None , **lowercase , ):
super().__init__(pad_token_id=UpperCAmelCase_ , mask_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
_lowerCamelCase : Optional[Any] = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Optional[Any] = num_attention_heads
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : Tuple = attention_probs_dropout_prob
_lowerCamelCase : List[str] = max_position_embeddings
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Optional[Any] = layer_norm_eps
_lowerCamelCase : Optional[Any] = position_embedding_type
_lowerCamelCase : Dict = use_cache
_lowerCamelCase : Union[str, Any] = emb_layer_norm_before
_lowerCamelCase : str = token_dropout
_lowerCamelCase : str = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
_lowerCamelCase : Optional[Any] = EsmFoldConfig()
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_lowerCamelCase : Optional[int] = EsmFoldConfig(**UpperCAmelCase_ )
_lowerCamelCase : Dict = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
_lowerCamelCase : Dict = get_default_vocab_list()
else:
_lowerCamelCase : str = vocab_list
else:
_lowerCamelCase : Any = None
_lowerCamelCase : int = None
if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , UpperCAmelCase_ ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def A_ ( self ):
_lowerCamelCase : Optional[int] = super().to_dict()
if isinstance(self.esmfold_config , UpperCAmelCase_ ):
_lowerCamelCase : List[Any] = self.esmfold_config.to_dict()
return output
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = None
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = 0
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = 1_28
lowerCamelCase__ = None
def A_ ( self ):
if self.trunk is None:
_lowerCamelCase : Optional[Any] = TrunkConfig()
elif isinstance(self.trunk , UpperCAmelCase_ ):
_lowerCamelCase : str = TrunkConfig(**self.trunk )
def A_ ( self ):
_lowerCamelCase : Tuple = asdict(self )
_lowerCamelCase : Tuple = self.trunk.to_dict()
return output
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = 48
lowerCamelCase__ = 10_24
lowerCamelCase__ = 1_28
lowerCamelCase__ = 32
lowerCamelCase__ = 32
lowerCamelCase__ = 32
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = False
lowerCamelCase__ = 4
lowerCamelCase__ = 1_28
lowerCamelCase__ = None
def A_ ( self ):
if self.structure_module is None:
_lowerCamelCase : int = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCAmelCase_ ):
_lowerCamelCase : Dict = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
_lowerCamelCase : Optional[int] = self.sequence_state_dim // self.sequence_head_width
_lowerCamelCase : str = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def A_ ( self ):
_lowerCamelCase : List[str] = asdict(self )
_lowerCamelCase : Optional[Any] = self.structure_module.to_dict()
return output
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = 3_84
lowerCamelCase__ = 1_28
lowerCamelCase__ = 16
lowerCamelCase__ = 1_28
lowerCamelCase__ = 12
lowerCamelCase__ = 4
lowerCamelCase__ = 8
lowerCamelCase__ = 0.1
lowerCamelCase__ = 8
lowerCamelCase__ = 1
lowerCamelCase__ = 2
lowerCamelCase__ = 7
lowerCamelCase__ = 10
lowerCamelCase__ = 1e-8
lowerCamelCase__ = 1e5
def A_ ( self ):
return asdict(self )
def _snake_case ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 364 |
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowercase__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowercase__ = []
lowercase__ = []
lowercase__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowercase__ = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F"🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results",
"""emoji""": True,
},
}
]
lowercase__ = 0
for log in Path().glob("""*.log"""):
lowercase__ = 0
with open(log, """r""") as f:
for line in f:
lowercase__ = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowercase__ = line["""nodeid"""]
if line.get("""duration""", None) is not None:
lowercase__ = F"{line['duration']:.4f}"
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowercase__ = []
log.unlink()
lowercase__ = """"""
lowercase__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowercase__ = []
lowercase__ = {}
for test in failed_tests:
lowercase__ = test[0].split("""::""")
lowercase__ = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowercase__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowercase__ = [test[0] for test in failed_table]
lowercase__ = list(set(files))
# Count number of instances in failed_tests
lowercase__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowercase__ = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
lowercase__ = """Too many failed tests, please see the full report in the Action results."""
lowercase__ = len(err) + 10
lowercase__ = message[: 3000 - offset] + F"\n...\n```\n{err}"
print(F"### {message}")
else:
lowercase__ = """No failed tests! 🤗"""
print(F"## {message}")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowercase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowercase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowercase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F"https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
payload.append(action_button)
lowercase__ = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}",
}
],
}
payload.append(date_report)
lowercase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowercase__ = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowercase__ = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowercase__ = row[0]
else:
lowercase__ = """"""
lowercase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F"Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
) | 12 | 0 |
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
lowercase__ = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
for attribute in key.split('.' ):
_lowerCamelCase : List[str] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if weight_type is not None:
_lowerCamelCase : Tuple = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).shape
else:
_lowerCamelCase : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_lowerCamelCase : int = value
elif weight_type == "weight_g":
_lowerCamelCase : Tuple = value
elif weight_type == "weight_v":
_lowerCamelCase : int = value
elif weight_type == "bias":
_lowerCamelCase : str = value
else:
_lowerCamelCase : Union[str, Any] = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Dict = []
_lowerCamelCase : str = fairseq_model.state_dict()
_lowerCamelCase : Optional[Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase : str = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , hf_model.config.feat_extract_norm == 'group' , )
_lowerCamelCase : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_lowerCamelCase : int = True
if "*" in mapped_key:
_lowerCamelCase : Optional[Any] = name.split(SCREAMING_SNAKE_CASE_ )[0].split('.' )[-2]
_lowerCamelCase : int = mapped_key.replace('*' , SCREAMING_SNAKE_CASE_ )
if "weight_g" in name:
_lowerCamelCase : Optional[Any] = 'weight_g'
elif "weight_v" in name:
_lowerCamelCase : Optional[int] = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
_lowerCamelCase : int = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase : Any = 'weight'
else:
_lowerCamelCase : str = None
set_recursively(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : Union[str, Any] = full_name.split('conv_layers.' )[-1]
_lowerCamelCase : Dict = name.split('.' )
_lowerCamelCase : int = int(items[0] )
_lowerCamelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_lowerCamelCase : List[str] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_lowerCamelCase : Union[str, Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_lowerCamelCase : Tuple = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_lowerCamelCase : List[Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def _snake_case ( lowercase__ , lowercase__ , lowercase__=None ):
# load the pre-trained checkpoints
_lowerCamelCase : str = torch.load(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Tuple = WavLMConfigOrig(checkpoint['cfg'] )
_lowerCamelCase : Optional[int] = WavLMOrig(SCREAMING_SNAKE_CASE_ )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
_lowerCamelCase : Tuple = WavLMConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
_lowerCamelCase : str = WavLMConfig()
_lowerCamelCase : List[Any] = WavLMModel(SCREAMING_SNAKE_CASE_ )
recursively_load_weights(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
hf_wavlm.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowercase__ = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 365 |
"""simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """AutoTokenizer"""
lowerCamelCase__ = ["""tokenizer"""]
lowerCamelCase__ = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self , lowercase , lowercase=None ):
super().__init__(lowercase )
_lowerCamelCase : Optional[int] = speaker_embeddings
@classmethod
def A_ ( cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ):
if speaker_embeddings_dict_path is not None:
_lowerCamelCase : Optional[Any] = get_file_from_repo(
lowercase , lowercase , subfolder=kwargs.pop('subfolder' , lowercase ) , cache_dir=kwargs.pop('cache_dir' , lowercase ) , force_download=kwargs.pop('force_download' , lowercase ) , proxies=kwargs.pop('proxies' , lowercase ) , resume_download=kwargs.pop('resume_download' , lowercase ) , local_files_only=kwargs.pop('local_files_only' , lowercase ) , use_auth_token=kwargs.pop('use_auth_token' , lowercase ) , revision=kwargs.pop('revision' , lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'''`{os.path.join(lowercase , lowercase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
_lowerCamelCase : List[Any] = None
else:
with open(lowercase ) as speaker_embeddings_json:
_lowerCamelCase : Union[str, Any] = json.load(lowercase )
else:
_lowerCamelCase : Tuple = None
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase , **lowercase )
return cls(tokenizer=lowercase , speaker_embeddings=lowercase )
def A_ ( self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowercase , lowercase , 'v2' ) , exist_ok=lowercase )
_lowerCamelCase : int = {}
_lowerCamelCase : List[Any] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_lowerCamelCase : Optional[Any] = self._load_voice_preset(lowercase )
_lowerCamelCase : Any = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , lowercase , F'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=lowercase , )
_lowerCamelCase : List[str] = os.path.join(lowercase , F'''{prompt_key}_{key}.npy''' )
_lowerCamelCase : Optional[Any] = tmp_dict
with open(os.path.join(lowercase , lowercase ) , 'w' ) as fp:
json.dump(lowercase , lowercase )
super().save_pretrained(lowercase , lowercase , **lowercase )
def A_ ( self , lowercase = None , **lowercase ):
_lowerCamelCase : Tuple = self.speaker_embeddings[voice_preset]
_lowerCamelCase : Any = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
_lowerCamelCase : Union[str, Any] = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , lowercase ) , cache_dir=kwargs.pop('cache_dir' , lowercase ) , force_download=kwargs.pop('force_download' , lowercase ) , proxies=kwargs.pop('proxies' , lowercase ) , resume_download=kwargs.pop('resume_download' , lowercase ) , local_files_only=kwargs.pop('local_files_only' , lowercase ) , use_auth_token=kwargs.pop('use_auth_token' , lowercase ) , revision=kwargs.pop('revision' , lowercase ) , )
if path is None:
raise ValueError(
F'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
_lowerCamelCase : List[str] = np.load(lowercase )
return voice_preset_dict
def A_ ( self , lowercase = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ):
if voice_preset is not None and not isinstance(lowercase , lowercase ):
if (
isinstance(lowercase , lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_lowerCamelCase : Any = self._load_voice_preset(lowercase )
else:
if isinstance(lowercase , lowercase ) and not voice_preset.endswith('.npz' ):
_lowerCamelCase : Optional[Any] = voice_preset + '.npz'
_lowerCamelCase : Union[str, Any] = np.load(lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(lowercase , **lowercase )
_lowerCamelCase : Tuple = BatchFeature(data=lowercase , tensor_type=lowercase )
_lowerCamelCase : Any = self.tokenizer(
lowercase , return_tensors=lowercase , padding='max_length' , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , )
if voice_preset is not None:
_lowerCamelCase : Optional[int] = voice_preset
return encoded_text | 12 | 0 |
"""simple docstring"""
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase ):
_lowerCamelCase : Union[str, Any] = val
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : Optional[Any] = None
def A_ ( self , lowercase ):
if self.val:
if val < self.val:
if self.left is None:
_lowerCamelCase : Any = Node(__lowerCamelCase )
else:
self.left.insert(__lowerCamelCase )
elif val > self.val:
if self.right is None:
_lowerCamelCase : Union[str, Any] = Node(__lowerCamelCase )
else:
self.right.insert(__lowerCamelCase )
else:
_lowerCamelCase : Optional[int] = val
def _snake_case ( lowercase__ , lowercase__ ):
# Recursive traversal
if root:
inorder(root.left , UpperCamelCase__ )
res.append(root.val )
inorder(root.right , UpperCamelCase__ )
def _snake_case ( lowercase__ ):
# Build BST
if len(UpperCamelCase__ ) == 0:
return arr
_lowerCamelCase : List[Any] = Node(arr[0] )
for i in range(1 , len(UpperCamelCase__ ) ):
root.insert(arr[i] )
# Traverse BST in order.
_lowerCamelCase : List[Any] = []
inorder(UpperCamelCase__ , UpperCamelCase__ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13])) | 366 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowercase__ = False
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : Tuple = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
_lowerCamelCase : Dict = torch.manual_seed(0 )
_lowerCamelCase : Dict = pipe(
image=lowercase , generator=lowercase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
_lowerCamelCase : str = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCamelCase : Any = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 12 | 0 |
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def _snake_case ( lowercase__="ro" , lowercase__="en" , lowercase__="wmt16" , lowercase__=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('run pip install datasets' )
_lowerCamelCase : str = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
_lowerCamelCase : Optional[int] = datasets.load_dataset(__lowerCamelCase , __lowerCamelCase )
if save_dir is None:
_lowerCamelCase : Tuple = f'''{dataset}-{pair}'''
_lowerCamelCase : Dict = Path(__lowerCamelCase )
save_dir.mkdir(exist_ok=__lowerCamelCase )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
_lowerCamelCase : int = 'val' if split == 'validation' else split
_lowerCamelCase : str = save_dir.joinpath(f'''{fn}.source''' )
_lowerCamelCase : Dict = save_dir.joinpath(f'''{fn}.target''' )
_lowerCamelCase : List[Any] = src_path.open('w+' )
_lowerCamelCase : Optional[int] = tgt_path.open('w+' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
_lowerCamelCase : Union[str, Any] = x['translation']
src_fp.write(ex[src_lang] + '\n' )
tgt_fp.write(ex[tgt_lang] + '\n' )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 367 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
lowercase__ = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
lowercase__ = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
lowercase__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def _snake_case ( lowercase__ ):
_lowerCamelCase : Tuple = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def _snake_case ( lowercase__ ):
return x[0]
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[Any] = get_letter_count(lowercase__ )
_lowerCamelCase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowercase__ )
_lowerCamelCase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowercase__ )
_lowerCamelCase : Optional[int] = ''.join(freq_to_letter[freq] )
_lowerCamelCase : Any = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowercase__ , reverse=lowercase__ )
_lowerCamelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowercase__ )
def _snake_case ( lowercase__ ):
_lowerCamelCase : str = get_frequency_order(lowercase__ )
_lowerCamelCase : Union[str, Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod() | 12 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase__ = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 368 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase ):
_lowerCamelCase : Dict = question_encoder
_lowerCamelCase : List[Any] = generator
_lowerCamelCase : Optional[Any] = self.question_encoder
def A_ ( self , lowercase ):
if os.path.isfile(lowercase ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(lowercase , exist_ok=lowercase )
_lowerCamelCase : List[Any] = os.path.join(lowercase , 'question_encoder_tokenizer' )
_lowerCamelCase : Dict = os.path.join(lowercase , 'generator_tokenizer' )
self.question_encoder.save_pretrained(lowercase )
self.generator.save_pretrained(lowercase )
@classmethod
def A_ ( cls , lowercase , **lowercase ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_lowerCamelCase : Optional[int] = kwargs.pop('config' , lowercase )
if config is None:
_lowerCamelCase : int = RagConfig.from_pretrained(lowercase )
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(
lowercase , config=config.question_encoder , subfolder='question_encoder_tokenizer' )
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained(
lowercase , config=config.generator , subfolder='generator_tokenizer' )
return cls(question_encoder=lowercase , generator=lowercase )
def __call__( self , *lowercase , **lowercase ):
return self.current_tokenizer(*lowercase , **lowercase )
def A_ ( self , *lowercase , **lowercase ):
return self.generator.batch_decode(*lowercase , **lowercase )
def A_ ( self , *lowercase , **lowercase ):
return self.generator.decode(*lowercase , **lowercase )
def A_ ( self ):
_lowerCamelCase : Any = self.question_encoder
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.generator
def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = "longest" , lowercase = None , lowercase = True , **lowercase , ):
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , lowercase , )
if max_length is None:
_lowerCamelCase : Optional[Any] = self.current_tokenizer.model_max_length
_lowerCamelCase : Optional[Any] = self(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , max_length=lowercase , padding=lowercase , truncation=lowercase , **lowercase , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowerCamelCase : int = self.current_tokenizer.model_max_length
_lowerCamelCase : str = self(
text_target=lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase , **lowercase , )
_lowerCamelCase : int = labels['input_ids']
return model_inputs | 12 | 0 |
"""simple docstring"""
import os
import numpy
import onnx
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Dict = a.name
_lowerCamelCase : Optional[int] = b.name
_lowerCamelCase : Union[str, Any] = ""
_lowerCamelCase : Dict = ""
_lowerCamelCase : str = a == b
_lowerCamelCase : str = name_a
_lowerCamelCase : List[str] = name_b
return res
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowercase__ , lowercase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ )
_graph_replace_input_with(node_proto.attribute[1].g , lowercase__ , lowercase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ )
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
for n in graph_proto.node:
_node_replace_input_with(lowercase__ , lowercase__ , lowercase__ )
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : Dict = list(model.graph.initializer )
_lowerCamelCase : Optional[int] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_lowerCamelCase : List[str] = inits[i].name
_lowerCamelCase : str = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowercase__ , lowercase__ )
def _snake_case ( lowercase__ ):
_lowerCamelCase : Any = os.path.dirname(lowercase__ )
_lowerCamelCase : Optional[int] = os.path.basename(lowercase__ )
_lowerCamelCase : List[Any] = onnx.load(os.path.join(lowercase__ , lowercase__ ) )
_lowerCamelCase : Optional[Any] = list(model.graph.initializer )
_lowerCamelCase : str = set()
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Tuple = []
_lowerCamelCase : str = 0
for i in range(len(lowercase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowercase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowercase__ )
dup_set.add(lowercase__ )
_lowerCamelCase : Union[str, Any] = inits[j].data_type
_lowerCamelCase : Tuple = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , lowercase__ )
total_reduced_size += mem_size
_lowerCamelCase : str = inits[i].name
_lowerCamelCase : List[Any] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowercase__ )
else:
_lowerCamelCase : Any = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
_lowerCamelCase : List[Any] = sorted(lowercase__ )
_remove_dup_initializers_from_model(lowercase__ , lowercase__ , lowercase__ )
_lowerCamelCase : Optional[int] = "optimized_" + model_file_name
_lowerCamelCase : List[Any] = os.path.join(lowercase__ , lowercase__ )
onnx.save(lowercase__ , lowercase__ )
return new_model | 369 |
"""simple docstring"""
def _snake_case ( lowercase__ = 10 ):
if not isinstance(lowercase__ , lowercase__ ) or n < 0:
raise ValueError('Invalid input' )
_lowerCamelCase : str = 10**n
_lowerCamelCase : Union[str, Any] = 28433 * (pow(2 , 7830457 , lowercase__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"{solution(10) = }") | 12 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowercase__ = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 370 |
"""simple docstring"""
import argparse
import datetime
def _snake_case ( lowercase__ ):
_lowerCamelCase : Dict = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
_lowerCamelCase : str = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(lowercase__ ) < 11:
raise ValueError('Must be 10 characters long' )
# Get month
_lowerCamelCase : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('Month must be between 1 - 12' )
_lowerCamelCase : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get day
_lowerCamelCase : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('Date must be between 1 - 31' )
# Get second separator
_lowerCamelCase : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get year
_lowerCamelCase : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?' )
# Get datetime obj for validation
_lowerCamelCase : str = datetime.date(int(lowercase__ ) , int(lowercase__ ) , int(lowercase__ ) )
# Start math
if m <= 2:
_lowerCamelCase : str = y - 1
_lowerCamelCase : Tuple = m + 12
# maths var
_lowerCamelCase : int = int(str(lowercase__ )[:2] )
_lowerCamelCase : int = int(str(lowercase__ )[2:] )
_lowerCamelCase : int = int(2.6 * m - 5.3_9 )
_lowerCamelCase : int = int(c / 4 )
_lowerCamelCase : int = int(k / 4 )
_lowerCamelCase : int = int(d + k )
_lowerCamelCase : int = int(t + u + v + x )
_lowerCamelCase : int = int(z - (2 * c) )
_lowerCamelCase : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.' )
# Response
_lowerCamelCase : str = f'''Your date {date_input}, is a {days[str(lowercase__ )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = argparse.ArgumentParser(
description=(
"""Find out what day of the week nearly any date is or was. Enter """
"""date as a string in the mm-dd-yyyy or mm/dd/yyyy format"""
)
)
parser.add_argument(
"""date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)"""
)
lowercase__ = parser.parse_args()
zeller(args.date_input) | 12 | 0 |
def _snake_case ( lowercase__ = 1000000 ):
_lowerCamelCase : List[str] = 1
_lowerCamelCase : str = 1
_lowerCamelCase : Dict = {1: 1}
for inputa in range(2 , __lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : List[Any] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_lowerCamelCase : Union[str, Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
_lowerCamelCase : Union[str, Any] = counter
if counter > pre_counter:
_lowerCamelCase : Union[str, Any] = inputa
_lowerCamelCase : Tuple = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip()))) | 371 |
"""simple docstring"""
import re
def _snake_case ( lowercase__ ):
_lowerCamelCase : Optional[int] = re.compile(r'^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$' )
if match := re.search(lowercase__ , lowercase__ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("""+918827897895""")) | 12 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/resolve/main/config.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/config.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/config.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json""",
}
class lowerCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
lowerCamelCase__ = """bloom"""
lowerCamelCase__ = ["""past_key_values"""]
lowerCamelCase__ = {
"""num_hidden_layers""": """n_layer""",
"""num_attention_heads""": """n_head""",
}
def __init__( self , lowercase=250880 , lowercase=64 , lowercase=2 , lowercase=8 , lowercase=1E-5 , lowercase=0.02 , lowercase=True , lowercase=1 , lowercase=2 , lowercase=False , lowercase=0.0 , lowercase=0.0 , lowercase=1 , lowercase=False , **lowercase , ):
_lowerCamelCase : Tuple = vocab_size
# Backward compatibility with n_embed kwarg
_lowerCamelCase : Union[str, Any] = kwargs.pop('n_embed' , lowercase )
_lowerCamelCase : Tuple = hidden_size if n_embed is None else n_embed
_lowerCamelCase : Optional[int] = n_layer
_lowerCamelCase : Dict = n_head
_lowerCamelCase : int = layer_norm_epsilon
_lowerCamelCase : Union[str, Any] = initializer_range
_lowerCamelCase : Optional[int] = use_cache
_lowerCamelCase : int = pretraining_tp
_lowerCamelCase : Any = apply_residual_connection_post_layernorm
_lowerCamelCase : Dict = hidden_dropout
_lowerCamelCase : Optional[int] = attention_dropout
_lowerCamelCase : Optional[int] = bos_token_id
_lowerCamelCase : Dict = eos_token_id
_lowerCamelCase : Optional[Any] = slow_but_exact
super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
class lowerCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
lowerCamelCase__ = version.parse("""1.12""" )
def __init__( self , lowercase , lowercase = "default" , lowercase = None , lowercase = False , ):
super().__init__(lowercase , task=lowercase , patching_specs=lowercase , use_past=lowercase )
if not getattr(self._config , 'pad_token_id' , lowercase ):
# TODO: how to do that better?
_lowerCamelCase : Optional[int] = 0
@property
def A_ ( self ):
_lowerCamelCase : Dict = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(lowercase , direction='inputs' , inverted_values_shape=lowercase )
_lowerCamelCase : Optional[int] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_lowerCamelCase : Tuple = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def A_ ( self ):
return self._config.n_layer
@property
def A_ ( self ):
return self._config.n_head
@property
def A_ ( self ):
return 1E-3
def A_ ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ):
_lowerCamelCase : Dict = super(lowercase , self ).generate_dummy_inputs(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
# We need to order the input in the way they appears in the forward()
_lowerCamelCase : Optional[Any] = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowerCamelCase : Optional[int] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowerCamelCase : List[str] = seqlen + 2
_lowerCamelCase : Optional[Any] = self._config.hidden_size // self.num_attention_heads
_lowerCamelCase : Optional[int] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
_lowerCamelCase : Union[str, Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
_lowerCamelCase : List[Any] = [
(torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(self.num_layers )
]
_lowerCamelCase : Tuple = common_inputs['attention_mask']
if self.use_past:
_lowerCamelCase : str = ordered_inputs['attention_mask'].dtype
_lowerCamelCase : Optional[int] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 )
return ordered_inputs
@property
def A_ ( self ):
return 13 | 350 |
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowercase__ = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowercase__ = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
lowercase__ = """zero2"""
lowercase__ = """zero3"""
lowercase__ = [ZEROa, ZEROa]
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_lowerCamelCase : List[str] = parameterized.to_safe_name('_'.join(str(lowercase__ ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
lowercase__ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
def A_ ( self , lowercase ):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = True , lowercase = True , lowercase = True , ):
_lowerCamelCase : List[str] = models[model]
_lowerCamelCase : Optional[int] = self.run_trainer(
stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , )
self.do_checks(lowercase )
return output_dir
def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = 1 , lowercase = True , lowercase = True , ):
_lowerCamelCase : List[str] = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase )
_lowerCamelCase : Any = F'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_lowerCamelCase : Optional[int] = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_lowerCamelCase : Optional[Any] = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_lowerCamelCase : Dict = self.get_launcher(lowercase )
_lowerCamelCase : Union[str, Any] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase , env=self.get_env() )
return output_dir
def A_ ( self , lowercase=False ):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
_lowerCamelCase : Any = min(2 , get_gpu_count() ) if distributed else 1
return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split() | 12 | 0 |
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=False , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ):
_lowerCamelCase : Optional[Any] = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : Any = seq_length
_lowerCamelCase : Union[str, Any] = is_training
_lowerCamelCase : str = use_input_mask
_lowerCamelCase : Tuple = use_token_type_ids
_lowerCamelCase : Union[str, Any] = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : Optional[int] = hidden_dropout_prob
_lowerCamelCase : Tuple = attention_probs_dropout_prob
_lowerCamelCase : Dict = max_position_embeddings
_lowerCamelCase : List[str] = type_vocab_size
_lowerCamelCase : Tuple = type_sequence_label_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Optional[int] = num_labels
_lowerCamelCase : Optional[int] = num_choices
_lowerCamelCase : Dict = scope
def A_ ( self ):
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : Dict = None
if self.use_input_mask:
_lowerCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : str = None
if self.use_token_type_ids:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : Dict = None
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : str = None
if self.use_labels:
_lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self ):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : List[str] = BioGptModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
_lowerCamelCase : Dict = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
_lowerCamelCase : Optional[int] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ):
_lowerCamelCase : List[Any] = BioGptForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
_lowerCamelCase : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ):
_lowerCamelCase : Optional[Any] = BioGptModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
# create attention mask
_lowerCamelCase : Optional[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
_lowerCamelCase : Optional[Any] = self.seq_length // 2
_lowerCamelCase : Optional[Any] = 0
# first forward pass
_lowerCamelCase, _lowerCamelCase : Any = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCamelCase : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
_lowerCamelCase : Optional[int] = ids_tensor((1,) , __SCREAMING_SNAKE_CASE ).item() + 1
_lowerCamelCase : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
_lowerCamelCase : Optional[Any] = random_other_next_tokens
# append to next input_ids and attn_mask
_lowerCamelCase : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCamelCase : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )] , dim=1 , )
# get two different outputs
_lowerCamelCase : Tuple = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )['last_hidden_state']
_lowerCamelCase : Any = model(__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )['last_hidden_state']
# select random slice
_lowerCamelCase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCamelCase : List[Any] = output_from_no_past[:, -1, random_slice_idx].detach()
_lowerCamelCase : List[str] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ):
_lowerCamelCase : str = BioGptModel(config=__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE ).eval()
_lowerCamelCase : Dict = torch.ones(input_ids.shape , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
# first forward pass
_lowerCamelCase : Tuple = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )
_lowerCamelCase, _lowerCamelCase : Optional[int] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_lowerCamelCase : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCamelCase : Optional[int] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
_lowerCamelCase : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCamelCase : int = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
_lowerCamelCase : List[Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )['last_hidden_state']
_lowerCamelCase : List[Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE )[
'last_hidden_state'
]
# select random slice
_lowerCamelCase : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCamelCase : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCamelCase : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase , lowercase=False ):
_lowerCamelCase : Any = BioGptForCausalLM(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
_lowerCamelCase : str = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def A_ ( self , lowercase , *lowercase ):
_lowerCamelCase : List[str] = BioGptModel(__SCREAMING_SNAKE_CASE )
_lowerCamelCase : Optional[Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ):
_lowerCamelCase : Union[str, Any] = self.num_labels
_lowerCamelCase : int = BioGptForTokenClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
_lowerCamelCase : List[str] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
),
) : Tuple = config_and_inputs
_lowerCamelCase : int = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
lowerCamelCase__ = (BioGptForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ = (
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
def A_ ( self ):
_lowerCamelCase : Optional[int] = BioGptModelTester(self )
_lowerCamelCase : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def A_ ( self ):
self.config_tester.run_common_tests()
def A_ ( self ):
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def A_ ( self ):
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase : List[str] = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def A_ ( self ):
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__SCREAMING_SNAKE_CASE )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__SCREAMING_SNAKE_CASE , gradient_checkpointing=__SCREAMING_SNAKE_CASE )
def A_ ( self ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__SCREAMING_SNAKE_CASE )
def A_ ( self ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__SCREAMING_SNAKE_CASE )
def A_ ( self ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__SCREAMING_SNAKE_CASE )
@slow
def A_ ( self ):
_lowerCamelCase : List[str] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(__SCREAMING_SNAKE_CASE )
_lowerCamelCase : Dict = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
_lowerCamelCase : Dict = 'left'
# Define PAD Token = EOS Token = 50256
_lowerCamelCase : Any = tokenizer.eos_token
_lowerCamelCase : List[str] = model.config.eos_token_id
# use different length sentences to test batching
_lowerCamelCase : Optional[Any] = [
'Hello, my dog is a little',
'Today, I',
]
_lowerCamelCase : str = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='pt' , padding=__SCREAMING_SNAKE_CASE )
_lowerCamelCase : Dict = inputs['input_ids'].to(__SCREAMING_SNAKE_CASE )
_lowerCamelCase : Union[str, Any] = model.generate(
input_ids=__SCREAMING_SNAKE_CASE , attention_mask=inputs['attention_mask'].to(__SCREAMING_SNAKE_CASE ) , )
_lowerCamelCase : Dict = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(__SCREAMING_SNAKE_CASE )
_lowerCamelCase : Union[str, Any] = model.generate(input_ids=__SCREAMING_SNAKE_CASE )
_lowerCamelCase : str = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
_lowerCamelCase : Dict = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(__SCREAMING_SNAKE_CASE )
_lowerCamelCase : Any = model.generate(input_ids=__SCREAMING_SNAKE_CASE , max_length=model.config.max_length - num_paddings )
_lowerCamelCase : Optional[Any] = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
_lowerCamelCase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
_lowerCamelCase : Any = tokenizer.decode(output_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
_lowerCamelCase : Any = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence] )
@slow
def A_ ( self ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = BioGptModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[int] = 3
_lowerCamelCase : Dict = input_dict['input_ids']
_lowerCamelCase : Tuple = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
_lowerCamelCase : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCamelCase : List[Any] = BioGptForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
_lowerCamelCase : Optional[int] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Any = 3
_lowerCamelCase : List[str] = 'multi_label_classification'
_lowerCamelCase : Optional[Any] = input_dict['input_ids']
_lowerCamelCase : List[Any] = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
_lowerCamelCase : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_lowerCamelCase : Union[str, Any] = BioGptForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
_lowerCamelCase : Any = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
_lowerCamelCase : Any = torch.tensor([[2, 4805, 9, 656, 21]] )
_lowerCamelCase : str = model(__SCREAMING_SNAKE_CASE )[0]
_lowerCamelCase : Any = 42384
_lowerCamelCase : str = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
_lowerCamelCase : Dict = torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def A_ ( self ):
_lowerCamelCase : int = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
_lowerCamelCase : Any = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(__SCREAMING_SNAKE_CASE )
torch.manual_seed(0 )
_lowerCamelCase : Dict = tokenizer('COVID-19 is' , return_tensors='pt' ).to(__SCREAMING_SNAKE_CASE )
_lowerCamelCase : List[str] = model.generate(
**__SCREAMING_SNAKE_CASE , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=__SCREAMING_SNAKE_CASE , )
_lowerCamelCase : List[str] = tokenizer.decode(output_ids[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
_lowerCamelCase : Optional[Any] = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) | 351 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = ["""pixel_values"""]
def __init__( self , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = 8 , **lowercase , ):
super().__init__(**lowercase )
_lowerCamelCase : Optional[Any] = do_rescale
_lowerCamelCase : Union[str, Any] = rescale_factor
_lowerCamelCase : Any = do_pad
_lowerCamelCase : Optional[int] = pad_size
def A_ ( self , lowercase , lowercase , lowercase = None , **lowercase ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A_ ( self , lowercase , lowercase , lowercase = None ):
_lowerCamelCase, _lowerCamelCase : Tuple = get_image_size(lowercase )
_lowerCamelCase : Union[str, Any] = (old_height // size + 1) * size - old_height
_lowerCamelCase : Tuple = (old_width // size + 1) * size - old_width
return pad(lowercase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=lowercase )
def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
_lowerCamelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Any = do_pad if do_pad is not None else self.do_pad
_lowerCamelCase : int = pad_size if pad_size is not None else self.pad_size
_lowerCamelCase : Dict = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
_lowerCamelCase : Dict = [to_numpy_array(lowercase ) for image in images]
if do_rescale:
_lowerCamelCase : str = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_pad:
_lowerCamelCase : str = [self.pad(lowercase , size=lowercase ) for image in images]
_lowerCamelCase : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
_lowerCamelCase : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=lowercase , tensor_type=lowercase ) | 12 | 0 |
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase ):
_lowerCamelCase : str = size
_lowerCamelCase : Any = [0] * size
_lowerCamelCase : Tuple = [0] * size
@staticmethod
def A_ ( lowercase ):
return index | (index + 1)
@staticmethod
def A_ ( lowercase ):
return (index & (index + 1)) - 1
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : int = value
while index < self.size:
_lowerCamelCase : Any = self.get_prev(lowercase ) + 1
if current_left_border == index:
_lowerCamelCase : int = value
else:
_lowerCamelCase : List[Any] = max(lowercase , lowercase , lowercase )
_lowerCamelCase : List[str] = self.get_next(lowercase )
def A_ ( self , lowercase , lowercase ):
right -= 1 # Because of right is exclusive
_lowerCamelCase : Optional[Any] = 0
while left <= right:
_lowerCamelCase : Tuple = self.get_prev(lowercase )
if left <= current_left:
_lowerCamelCase : Tuple = max(lowercase , self.tree[right] )
_lowerCamelCase : Dict = current_left
else:
_lowerCamelCase : str = max(lowercase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod() | 352 |
"""simple docstring"""
import os
import string
import sys
lowercase__ = 1 << 8
lowercase__ = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
lowercase__ = KEYMAP["""up"""]
lowercase__ = KEYMAP["""left"""]
if sys.platform == "win32":
lowercase__ = []
lowercase__ = {
B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
lowercase__ = ord(str(i))
def _snake_case ( ):
if os.name == "nt":
import msvcrt
_lowerCamelCase : Any = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(lowercase__ ) == 0:
# Read the keystroke
_lowerCamelCase : str = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_lowerCamelCase : List[Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_lowerCamelCase : Union[str, Any] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(lowercase__ )
if ord(lowercase__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_lowerCamelCase : List[Any] = chr(KEYMAP['esc'] )
except KeyError:
_lowerCamelCase : int = cha[1]
else:
_lowerCamelCase : Optional[int] = ch.decode(lowercase__ )
else:
_lowerCamelCase : Union[str, Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_lowerCamelCase : List[str] = sys.stdin.fileno()
_lowerCamelCase : Tuple = termios.tcgetattr(lowercase__ )
try:
tty.setraw(lowercase__ )
_lowerCamelCase : Optional[Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(lowercase__ , termios.TCSADRAIN , lowercase__ )
return ch
def _snake_case ( ):
_lowerCamelCase : int = get_raw_chars()
if ord(lowercase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(lowercase__ ) == KEYMAP["esc"]:
_lowerCamelCase : Union[str, Any] = get_raw_chars()
if ord(lowercase__ ) == KEYMAP["mod_int"]:
_lowerCamelCase : List[Any] = get_raw_chars()
if ord(lowercase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowercase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(lowercase__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"] | 12 | 0 |
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowercase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def _snake_case ( lowercase__ , lowercase__ ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_lowerCamelCase : Optional[int] = XLMProphetNetForConditionalGenerationOld.from_pretrained(_lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : int = XLMProphetNetForConditionalGeneration.from_pretrained(
_lowerCAmelCase , output_loading_info=_lowerCAmelCase )
else:
_lowerCamelCase : Optional[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(_lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : int = ProphetNetForConditionalGeneration.from_pretrained(
_lowerCAmelCase , output_loading_info=_lowerCAmelCase )
_lowerCamelCase : int = ['key_proj', 'value_proj', 'query_proj']
_lowerCamelCase : List[Any] = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
_lowerCamelCase : Any = key.split('.' )
if attributes[0] == "lm_head":
_lowerCamelCase : Any = prophet
_lowerCamelCase : Any = prophet_old
else:
_lowerCamelCase : Optional[Any] = prophet.prophetnet
_lowerCamelCase : Dict = prophet_old.model
_lowerCamelCase : List[Any] = False
for attribute in attributes:
if attribute in mapping:
_lowerCamelCase : Union[str, Any] = mapping[attribute]
if not hasattr(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) > 0:
_lowerCamelCase : Optional[int] = attribute
elif hasattr(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCamelCase : Tuple = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_lowerCamelCase : Optional[int] = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
_lowerCamelCase : Optional[int] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_lowerCamelCase : int = old_model.bias
logger.info(f'''{attribute} is initialized''' )
_lowerCamelCase : List[str] = True
break
elif attribute in special_keys and hasattr(_lowerCAmelCase , 'in_proj_weight' ):
_lowerCamelCase : Tuple = old_model.in_proj_weight.shape[0] // 3
_lowerCamelCase : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_lowerCamelCase : List[str] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_lowerCamelCase : Optional[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_lowerCamelCase : Dict = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_lowerCamelCase : Any = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_lowerCamelCase : Optional[Any] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_lowerCamelCase : Dict = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_lowerCamelCase : str = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
_lowerCamelCase : Optional[int] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
_lowerCamelCase : Optional[int] = True
break
if attribute.isdigit():
_lowerCamelCase : Tuple = model[int(_lowerCAmelCase )]
_lowerCamelCase : Dict = old_model[int(_lowerCAmelCase )]
else:
_lowerCamelCase : Any = getattr(_lowerCAmelCase , _lowerCAmelCase )
if old_attribute == "":
_lowerCamelCase : List[str] = old_model
else:
if not hasattr(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
_lowerCamelCase : List[str] = getattr(_lowerCAmelCase , _lowerCAmelCase )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowercase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path) | 353 |
"""simple docstring"""
from typing import Any
def _snake_case ( lowercase__ ):
if not input_list:
return []
_lowerCamelCase : Any = [input_list.count(lowercase__ ) for value in input_list]
_lowerCamelCase : Dict = max(lowercase__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod() | 12 | 0 |
"""simple docstring"""
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def _snake_case ( lowercase__ ):
if isinstance(_A , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class lowerCAmelCase__ :
'''simple docstring'''
def A_ ( self , lowercase , lowercase ):
pass
def A_ ( self ):
pass
def A_ ( self ):
pass
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase ):
_lowerCamelCase : List[str] = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCamelCase : Any = TFVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Tuple = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase ):
_lowerCamelCase, _lowerCamelCase : int = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCamelCase : Tuple = TFVisionTextDualEncoderModel(vision_model=_SCREAMING_SNAKE_CASE , text_model=_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Optional[int] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase ):
_lowerCamelCase, _lowerCamelCase : int = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCamelCase : str = {'vision_model': vision_model, 'text_model': text_model}
_lowerCamelCase : List[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE )
_lowerCamelCase : int = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase ):
_lowerCamelCase, _lowerCamelCase : Any = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCamelCase : Tuple = TFVisionTextDualEncoderModel(vision_model=_SCREAMING_SNAKE_CASE , text_model=_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Any = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Union[str, Any] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE )
_lowerCamelCase : List[str] = TFVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE )
_lowerCamelCase : int = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Union[str, Any] = after_output[0].numpy()
_lowerCamelCase : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-5 )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase ):
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCamelCase : int = TFVisionTextDualEncoderModel(vision_model=_SCREAMING_SNAKE_CASE , text_model=_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Dict = model(
input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Optional[int] = output.vision_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCamelCase : Dict = to_atuple(vision_model.config.image_size )
_lowerCamelCase : int = to_atuple(vision_model.config.patch_size )
_lowerCamelCase : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCamelCase : Optional[Any] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowerCamelCase : Tuple = output.text_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def A_ ( self , lowercase , lowercase , lowercase ):
_lowerCamelCase : Optional[Any] = np.abs((a - b) ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def A_ ( self ):
_lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_SCREAMING_SNAKE_CASE )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_SCREAMING_SNAKE_CASE )
def A_ ( self ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_SCREAMING_SNAKE_CASE )
def A_ ( self ):
_lowerCamelCase : Tuple = self.prepare_config_and_inputs()
self.check_save_load(**_SCREAMING_SNAKE_CASE )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_SCREAMING_SNAKE_CASE )
@slow
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : Tuple = self.get_pretrained_model_and_inputs()
_lowerCamelCase : Union[str, Any] = model_a(**_SCREAMING_SNAKE_CASE )
_lowerCamelCase : int = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Optional[Any] = TFVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Optional[Any] = model_a(**_SCREAMING_SNAKE_CASE )
_lowerCamelCase : List[Any] = after_outputs[0].numpy()
_lowerCamelCase : Tuple = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-5 )
@require_tf
class lowerCAmelCase__ ( lowerCAmelCase__, unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : Optional[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
_lowerCamelCase : Union[str, Any] = 13
_lowerCamelCase : Union[str, Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCamelCase : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCamelCase : Optional[Any] = random_attention_mask([batch_size, 4] )
_lowerCamelCase : Union[str, Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : Any = TFViTModel(_SCREAMING_SNAKE_CASE , name='vision_model' )
_lowerCamelCase : Dict = TFBertModel(_SCREAMING_SNAKE_CASE , name='text_model' )
return vision_model, text_model
def A_ ( self ):
_lowerCamelCase : List[Any] = TFViTModelTester(self )
_lowerCamelCase : int = TFBertModelTester(self )
_lowerCamelCase : int = vit_model_tester.prepare_config_and_inputs()
_lowerCamelCase : int = bert_model_tester.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = vision_config_and_inputs
(
(
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
),
) : List[Any] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class lowerCAmelCase__ ( lowerCAmelCase__, unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
_lowerCamelCase : List[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
_lowerCamelCase : Dict = 13
_lowerCamelCase : List[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCamelCase : Optional[int] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCamelCase : List[str] = random_attention_mask([batch_size, 4] )
_lowerCamelCase : Any = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase ):
_lowerCamelCase, _lowerCamelCase : int = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCamelCase : Optional[int] = TFVisionTextDualEncoderModel(vision_model=_SCREAMING_SNAKE_CASE , text_model=_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Optional[int] = model(
input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Optional[int] = output.vision_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCamelCase : List[Any] = to_atuple(vision_model.config.image_size )
_lowerCamelCase : Optional[Any] = to_atuple(vision_model.config.patch_size )
_lowerCamelCase : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCamelCase : Any = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowerCamelCase : Dict = output.text_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : Tuple = TFDeiTModel(_SCREAMING_SNAKE_CASE , name='vision_model' )
_lowerCamelCase : Union[str, Any] = TFRobertaModel(_SCREAMING_SNAKE_CASE , name='text_model' )
return vision_model, text_model
def A_ ( self ):
_lowerCamelCase : int = TFDeiTModelTester(self )
_lowerCamelCase : Dict = TFRobertaModelTester(self )
_lowerCamelCase : Union[str, Any] = vit_model_tester.prepare_config_and_inputs()
_lowerCamelCase : str = bert_model_tester.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = vision_config_and_inputs
(
(
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
),
) : int = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class lowerCAmelCase__ ( lowerCAmelCase__, unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : List[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
_lowerCamelCase : Optional[Any] = 13
_lowerCamelCase : Dict = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCamelCase : Union[str, Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCamelCase : Any = random_attention_mask([batch_size, 4] )
_lowerCamelCase : Optional[int] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : Union[str, Any] = TFCLIPVisionModel(_SCREAMING_SNAKE_CASE , name='vision_model' )
_lowerCamelCase : List[Any] = TFBertModel(_SCREAMING_SNAKE_CASE , name='text_model' )
return vision_model, text_model
def A_ ( self ):
_lowerCamelCase : List[str] = TFCLIPVisionModelTester(self )
_lowerCamelCase : List[Any] = TFBertModelTester(self )
_lowerCamelCase : Any = clip_model_tester.prepare_config_and_inputs()
_lowerCamelCase : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase : Optional[Any] = vision_config_and_inputs
(
(
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
),
) : Optional[Any] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A_ ( self ):
_lowerCamelCase : List[str] = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Optional[int] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
_lowerCamelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCamelCase : Dict = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors='np' )
_lowerCamelCase : Union[str, Any] = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_lowerCamelCase : Optional[Any] = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _SCREAMING_SNAKE_CASE , atol=1E-3 ) ) | 354 |
"""simple docstring"""
def _snake_case ( lowercase__ ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
_lowerCamelCase : List[str] = len(lowercase__ )
_lowerCamelCase : List[str] = max(lowercase__ )
_lowerCamelCase : List[str] = min(lowercase__ )
# create the counting array
_lowerCamelCase : List[Any] = coll_max + 1 - coll_min
_lowerCamelCase : List[Any] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowercase__ ):
_lowerCamelCase : Optional[int] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
_lowerCamelCase : Dict = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowercase__ ) ):
_lowerCamelCase : Any = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _snake_case ( lowercase__ ):
return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
lowercase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase__ = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted)) | 12 | 0 |
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
) | 355 |
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
lowercase__ = parser.parse_args()
lowercase__ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 12 | 0 |
"""simple docstring"""
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : str = mock.Mock()
_lowerCamelCase : Dict = 500
_lowerCamelCase : Dict = {}
_lowerCamelCase : List[Any] = HTTPError
_lowerCamelCase : Tuple = {}
# Download this model to make sure it's in the cache.
_lowerCamelCase : Any = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=lowerCamelCase__ ) as mock_head:
_lowerCamelCase : Optional[Any] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def A_ ( self ):
_lowerCamelCase : Optional[Any] = mock.Mock()
_lowerCamelCase : List[Any] = 500
_lowerCamelCase : Optional[Any] = {}
_lowerCamelCase : Any = HTTPError
_lowerCamelCase : Dict = {}
# Download this model to make sure it's in the cache.
_lowerCamelCase : List[Any] = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=lowerCamelCase__ ) as mock_head:
_lowerCamelCase : List[str] = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def A_ ( self ):
try:
_lowerCamelCase : List[Any] = tempfile.mktemp()
with open(lowerCamelCase__ , 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , lowerCamelCase__ )
_lowerCamelCase : List[Any] = AlbertTokenizer.from_pretrained(lowerCamelCase__ )
finally:
os.remove(lowerCamelCase__ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , lowerCamelCase__ )
_lowerCamelCase : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def A_ ( self ):
_lowerCamelCase : Optional[int] = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A_ ( cls ):
_lowerCamelCase : Any = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def A_ ( cls ):
try:
delete_repo(token=cls._token , repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def A_ ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Optional[int] = os.path.join(lowerCamelCase__ , 'vocab.txt' )
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_lowerCamelCase : Optional[Any] = BertTokenizer(lowerCamelCase__ )
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token )
_lowerCamelCase : Any = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase__ , repo_id='test-tokenizer' , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
_lowerCamelCase : Dict = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def A_ ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Optional[int] = os.path.join(lowerCamelCase__ , 'vocab.txt' )
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_lowerCamelCase : List[str] = BertTokenizer(lowerCamelCase__ )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token )
_lowerCamelCase : Optional[int] = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCamelCase__ , repo_id='valid_org/test-tokenizer-org' , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
_lowerCamelCase : Any = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def A_ ( self ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Dict = os.path.join(lowerCamelCase__ , 'vocab.txt' )
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_lowerCamelCase : int = CustomTokenizer(lowerCamelCase__ )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : int = os.path.join(lowerCamelCase__ , 'vocab.txt' )
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_lowerCamelCase : Any = BertTokenizerFast.from_pretrained(lowerCamelCase__ )
bert_tokenizer.save_pretrained(lowerCamelCase__ )
_lowerCamelCase : str = CustomTokenizerFast.from_pretrained(lowerCamelCase__ )
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' )
_lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' , use_fast=lowerCamelCase__ , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : str = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def A_ ( self ):
_lowerCamelCase : Tuple = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] )
def A_ ( self ):
_lowerCamelCase : int = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] )
def A_ ( self ):
_lowerCamelCase : str = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def A_ ( self ):
_lowerCamelCase : List[Any] = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def A_ ( self ):
_lowerCamelCase : Any = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] )
def A_ ( self ):
_lowerCamelCase : List[Any] = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = Trie()
_lowerCamelCase : Union[str, Any] = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(lowerCamelCase__ , ['AB', 'C'] ) | 356 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = (UnCLIPScheduler,)
def A_ ( self , **lowercase ):
_lowerCamelCase : Any = {
'num_train_timesteps': 1000,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**lowercase )
return config
def A_ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase )
def A_ ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowercase )
def A_ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase )
def A_ ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowercase )
def A_ ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowercase )
def A_ ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowercase , prev_timestep=lowercase )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[int] = self.get_scheduler_config(variance_type='fixed_small_log' )
_lowerCamelCase : str = scheduler_class(**lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5
def A_ ( self ):
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config(variance_type='learned_range' )
_lowerCamelCase : int = scheduler_class(**lowercase )
_lowerCamelCase : List[str] = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowercase ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=lowercase ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=lowercase ) - -0.0_01_00_11 < 1E-5
def A_ ( self ):
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config()
_lowerCamelCase : Tuple = scheduler_class(**lowercase )
_lowerCamelCase : Union[str, Any] = scheduler.timesteps
_lowerCamelCase : Any = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter
_lowerCamelCase : Optional[int] = torch.manual_seed(0 )
for i, t in enumerate(lowercase ):
# 1. predict noise residual
_lowerCamelCase : Tuple = model(lowercase , lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : List[Any] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample
_lowerCamelCase : Optional[int] = pred_prev_sample
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) )
_lowerCamelCase : List[Any] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def A_ ( self ):
_lowerCamelCase : Tuple = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : Optional[Any] = scheduler_class(**lowercase )
scheduler.set_timesteps(25 )
_lowerCamelCase : Optional[Any] = scheduler.timesteps
_lowerCamelCase : Optional[int] = self.dummy_model()
_lowerCamelCase : Any = self.dummy_sample_deter
_lowerCamelCase : str = torch.manual_seed(0 )
for i, t in enumerate(lowercase ):
# 1. predict noise residual
_lowerCamelCase : List[Any] = model(lowercase , lowercase )
if i + 1 == timesteps.shape[0]:
_lowerCamelCase : Optional[int] = None
else:
_lowerCamelCase : List[str] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : Union[str, Any] = scheduler.step(
lowercase , lowercase , lowercase , prev_timestep=lowercase , generator=lowercase ).prev_sample
_lowerCamelCase : List[Any] = pred_prev_sample
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) )
_lowerCamelCase : List[str] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def A_ ( self ):
pass
def A_ ( self ):
pass | 12 | 0 |
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCAmelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ = 'data2vec-audio'
def __init__( self , lowercase=32 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-5 , lowercase="gelu" , lowercase=(512, 512, 512, 512, 512, 512, 512) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(10, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=16 , lowercase=19 , lowercase=5 , lowercase=0.05 , lowercase=10 , lowercase=2 , lowercase=0.0 , lowercase=10 , lowercase=0 , lowercase="sum" , lowercase=False , lowercase=False , lowercase=256 , lowercase=(512, 512, 512, 512, 1500) , lowercase=(5, 3, 3, 1, 1) , lowercase=(1, 2, 3, 1, 1) , lowercase=512 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=3 , lowercase=2 , lowercase=3 , lowercase=None , **lowercase , ):
super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ )
_lowerCamelCase : int = hidden_size
_lowerCamelCase : Any = feat_extract_activation
_lowerCamelCase : Dict = list(lowercase__ )
_lowerCamelCase : List[Any] = list(lowercase__ )
_lowerCamelCase : Tuple = list(lowercase__ )
_lowerCamelCase : Union[str, Any] = conv_bias
_lowerCamelCase : Any = num_conv_pos_embeddings
_lowerCamelCase : Union[str, Any] = num_conv_pos_embedding_groups
_lowerCamelCase : int = conv_pos_kernel_size
_lowerCamelCase : Dict = len(self.conv_dim )
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : Any = hidden_dropout
_lowerCamelCase : Optional[Any] = attention_dropout
_lowerCamelCase : Any = activation_dropout
_lowerCamelCase : Tuple = feat_proj_dropout
_lowerCamelCase : int = final_dropout
_lowerCamelCase : List[Any] = layerdrop
_lowerCamelCase : Dict = layer_norm_eps
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Optional[Any] = vocab_size
_lowerCamelCase : Any = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase : List[str] = mask_time_prob
_lowerCamelCase : Dict = mask_time_length
_lowerCamelCase : Optional[int] = mask_time_min_masks
_lowerCamelCase : int = mask_feature_prob
_lowerCamelCase : List[str] = mask_feature_length
_lowerCamelCase : Union[str, Any] = mask_feature_min_masks
# ctc loss
_lowerCamelCase : int = ctc_loss_reduction
_lowerCamelCase : Any = ctc_zero_infinity
# adapter
_lowerCamelCase : Dict = add_adapter
_lowerCamelCase : Union[str, Any] = adapter_kernel_size
_lowerCamelCase : List[str] = adapter_stride
_lowerCamelCase : List[Any] = num_adapter_layers
_lowerCamelCase : Tuple = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCamelCase : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCamelCase : Union[str, Any] = list(lowercase__ )
_lowerCamelCase : str = list(lowercase__ )
_lowerCamelCase : List[str] = list(lowercase__ )
_lowerCamelCase : Tuple = xvector_output_dim
@property
def A_ ( self ):
return math.prod(self.conv_stride ) | 357 |
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """data2vec-audio"""
def __init__( self , lowercase=32 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-5 , lowercase="gelu" , lowercase=(512, 512, 512, 512, 512, 512, 512) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(10, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=16 , lowercase=19 , lowercase=5 , lowercase=0.05 , lowercase=10 , lowercase=2 , lowercase=0.0 , lowercase=10 , lowercase=0 , lowercase="sum" , lowercase=False , lowercase=False , lowercase=256 , lowercase=(512, 512, 512, 512, 1500) , lowercase=(5, 3, 3, 1, 1) , lowercase=(1, 2, 3, 1, 1) , lowercase=512 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=3 , lowercase=2 , lowercase=3 , lowercase=None , **lowercase , ):
super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase )
_lowerCamelCase : str = hidden_size
_lowerCamelCase : str = feat_extract_activation
_lowerCamelCase : Optional[Any] = list(lowercase )
_lowerCamelCase : Dict = list(lowercase )
_lowerCamelCase : Dict = list(lowercase )
_lowerCamelCase : Optional[Any] = conv_bias
_lowerCamelCase : Union[str, Any] = num_conv_pos_embeddings
_lowerCamelCase : List[Any] = num_conv_pos_embedding_groups
_lowerCamelCase : List[Any] = conv_pos_kernel_size
_lowerCamelCase : Optional[int] = len(self.conv_dim )
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Any = hidden_dropout
_lowerCamelCase : Union[str, Any] = attention_dropout
_lowerCamelCase : str = activation_dropout
_lowerCamelCase : Any = feat_proj_dropout
_lowerCamelCase : Tuple = final_dropout
_lowerCamelCase : Union[str, Any] = layerdrop
_lowerCamelCase : List[Any] = layer_norm_eps
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : Optional[int] = vocab_size
_lowerCamelCase : Tuple = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase : Optional[Any] = mask_time_prob
_lowerCamelCase : List[Any] = mask_time_length
_lowerCamelCase : List[Any] = mask_time_min_masks
_lowerCamelCase : Tuple = mask_feature_prob
_lowerCamelCase : Optional[Any] = mask_feature_length
_lowerCamelCase : Dict = mask_feature_min_masks
# ctc loss
_lowerCamelCase : Tuple = ctc_loss_reduction
_lowerCamelCase : str = ctc_zero_infinity
# adapter
_lowerCamelCase : Union[str, Any] = add_adapter
_lowerCamelCase : List[Any] = adapter_kernel_size
_lowerCamelCase : Optional[Any] = adapter_stride
_lowerCamelCase : List[Any] = num_adapter_layers
_lowerCamelCase : int = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCamelCase : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCamelCase : List[str] = list(lowercase )
_lowerCamelCase : Optional[Any] = list(lowercase )
_lowerCamelCase : Any = list(lowercase )
_lowerCamelCase : Optional[Any] = xvector_output_dim
@property
def A_ ( self ):
return math.prod(self.conv_stride ) | 12 | 0 |
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 358 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
lowercase__ = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """facebook/nllb-200-distilled-600M"""
lowerCamelCase__ = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
lowerCamelCase__ = """translator"""
lowerCamelCase__ = AutoTokenizer
lowerCamelCase__ = AutoModelForSeqaSeqLM
lowerCamelCase__ = LANGUAGE_CODES
lowerCamelCase__ = ["""text""", """text""", """text"""]
lowerCamelCase__ = ["""text"""]
def A_ ( self , lowercase , lowercase , lowercase ):
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''' )
_lowerCamelCase : str = self.lang_to_code[src_lang]
_lowerCamelCase : int = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowercase , return_tensors='pt' , src_lang=lowercase , tgt_lang=lowercase )
def A_ ( self , lowercase ):
return self.model.generate(**lowercase )
def A_ ( self , lowercase ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowercase ) | 12 | 0 |
lowercase__ = """Tobias Carryer"""
from time import time
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase=int(time() ) ): # noqa: B008
_lowerCamelCase : Dict = multiplier
_lowerCamelCase : Any = increment
_lowerCamelCase : Dict = modulo
_lowerCamelCase : str = seed
def A_ ( self ):
_lowerCamelCase : Dict = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
lowercase__ = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number()) | 359 |
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def A_ ( self , lowercase , lowercase , lowercase ):
_lowerCamelCase : Optional[int] = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
_lowerCamelCase : Tuple = VideoClassificationPipeline(model=lowercase , image_processor=lowercase , top_k=2 )
_lowerCamelCase : List[str] = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def A_ ( self , lowercase , lowercase ):
for example in examples:
_lowerCamelCase : Tuple = video_classifier(lowercase )
self.assertEqual(
lowercase , [
{'score': ANY(lowercase ), 'label': ANY(lowercase )},
{'score': ANY(lowercase ), 'label': ANY(lowercase )},
] , )
@require_torch
def A_ ( self ):
_lowerCamelCase : Optional[Any] = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
_lowerCamelCase : Tuple = VideoMAEFeatureExtractor(
size={'shortest_edge': 10} , crop_size={'height': 10, 'width': 10} )
_lowerCamelCase : Dict = pipeline(
'video-classification' , model=lowercase , feature_extractor=lowercase , frame_sampling_rate=4 )
_lowerCamelCase : Any = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
_lowerCamelCase : Dict = video_classifier(lowercase , top_k=2 )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}] , )
_lowerCamelCase : str = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
[{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}],
[{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}],
] , )
@require_tf
def A_ ( self ):
pass | 12 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowercase__ = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""SpeechEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""FlaxSpeechEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 360 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase__ = {
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 12 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
lowercase__ = {
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
lowercase__ = {
"RUCAIBox/mvp": 1024,
}
class lowerCAmelCase__ ( UpperCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["""input_ids""", """attention_mask"""]
lowerCamelCase__ = MvpTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase="replace" , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=False , lowercase=True , **lowercase , ):
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
_lowerCamelCase : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _a ) != add_prefix_space:
_lowerCamelCase : Union[str, Any] = getattr(_a , pre_tok_state.pop('type' ) )
_lowerCamelCase : Union[str, Any] = add_prefix_space
_lowerCamelCase : str = pre_tok_class(**_a )
_lowerCamelCase : List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : List[str] = """post_processor"""
_lowerCamelCase : str = getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
_lowerCamelCase : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : str = tuple(state['sep'] )
if "cls" in state:
_lowerCamelCase : str = tuple(state['cls'] )
_lowerCamelCase : str = False
if state.get('add_prefix_space' , _a ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : List[Any] = True
if state.get('trim_offsets' , _a ) != trim_offsets:
_lowerCamelCase : Any = trim_offsets
_lowerCamelCase : Tuple = True
if changes_to_apply:
_lowerCamelCase : Optional[int] = getattr(_a , state.pop('type' ) )
_lowerCamelCase : Dict = component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
def A_ ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def A_ ( self , lowercase ):
_lowerCamelCase : Tuple = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
_lowerCamelCase : Any = value
def A_ ( self , *lowercase , **lowercase ):
_lowerCamelCase : Optional[int] = kwargs.get('is_split_into_words' , _a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*_a , **_a )
def A_ ( self , *lowercase , **lowercase ):
_lowerCamelCase : int = kwargs.get('is_split_into_words' , _a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*_a , **_a )
def A_ ( self , lowercase , lowercase = None ):
_lowerCamelCase : Optional[int] = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def A_ ( self , lowercase , lowercase=None ):
_lowerCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A_ ( self , lowercase , lowercase = None ):
_lowerCamelCase : List[str] = [self.sep_token_id]
_lowerCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 361 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _snake_case ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None ):
if attention_mask is None:
_lowerCamelCase : List[str] = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = OPTConfig
lowerCamelCase__ = {}
lowerCamelCase__ = """gelu"""
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=16 , lowercase=2 , lowercase=4 , lowercase=4 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , lowercase=16 , lowercase=16 , ):
_lowerCamelCase : Tuple = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : str = is_training
_lowerCamelCase : Optional[int] = use_labels
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[str] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : List[Any] = eos_token_id
_lowerCamelCase : Tuple = pad_token_id
_lowerCamelCase : List[str] = bos_token_id
_lowerCamelCase : Optional[int] = embed_dim
_lowerCamelCase : List[str] = word_embed_proj_dim
_lowerCamelCase : Any = False
def A_ ( self ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowerCamelCase : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowerCamelCase : str = tf.concat([input_ids, eos_tensor] , axis=1 )
_lowerCamelCase : Tuple = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase , **self.config_updates , )
_lowerCamelCase : int = prepare_opt_inputs_dict(lowercase , lowercase )
return config, inputs_dict
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : Optional[Any] = TFOPTModel(config=lowercase )
_lowerCamelCase : Optional[Any] = inputs_dict['input_ids']
_lowerCamelCase : str = input_ids[:1, :]
_lowerCamelCase : Dict = inputs_dict['attention_mask'][:1, :]
_lowerCamelCase : Optional[Any] = 1
# first forward pass
_lowerCamelCase : Any = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
_lowerCamelCase, _lowerCamelCase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCamelCase : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCamelCase : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_lowerCamelCase : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
_lowerCamelCase : Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_lowerCamelCase : Optional[Any] = model(lowercase , attention_mask=lowercase )[0]
_lowerCamelCase : List[str] = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_lowerCamelCase : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_lowerCamelCase : Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
_lowerCamelCase : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
@require_tf
class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowerCamelCase__ = (TFOPTForCausalLM,) if is_tf_available() else ()
lowerCamelCase__ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = 10
def A_ ( self ):
_lowerCamelCase : int = TFOPTModelTester(self )
_lowerCamelCase : Tuple = ConfigTester(self , config_class=lowercase )
def A_ ( self ):
self.config_tester.run_common_tests()
def A_ ( self ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowercase , lowercase ):
if hasattr(lowercase , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowercase , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
_lowerCamelCase : Optional[int] = model_class(config=lowercase )
_lowerCamelCase : int = _get_word_embedding_weight(lowercase , model.get_input_embeddings() )
_lowerCamelCase : Tuple = _get_word_embedding_weight(lowercase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowercase )
_lowerCamelCase : str = _get_word_embedding_weight(lowercase , model.get_input_embeddings() )
_lowerCamelCase : Any = _get_word_embedding_weight(lowercase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_lowerCamelCase : Union[str, Any] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowercase )
# check that weights remain the same after resizing
_lowerCamelCase : int = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_lowerCamelCase : Optional[Any] = False
self.assertTrue(lowercase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowercase )
_lowerCamelCase : Dict = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_lowerCamelCase : Union[str, Any] = False
self.assertTrue(lowercase )
def _snake_case ( lowercase__ ):
return tf.constant(lowercase__ , dtype=tf.intaa )
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = 99
def A_ ( self ):
_lowerCamelCase : Tuple = tf.ones((4, 1) , dtype=tf.intaa ) * 2
_lowerCamelCase : Tuple = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
_lowerCamelCase : int = input_ids.shape[0]
_lowerCamelCase : List[Any] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A_ ( self ):
_lowerCamelCase : Tuple = TFOPTModel.from_pretrained('facebook/opt-350m' )
_lowerCamelCase : List[Any] = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_lowerCamelCase : List[str] = tf.not_equal(lowercase , model.config.pad_token_id )
with tf.GradientTape():
_lowerCamelCase : List[str] = model(input_ids=lowercase , attention_mask=lowercase ).last_hidden_state
_lowerCamelCase : Optional[Any] = (1, 11, 512)
self.assertEqual(output.shape , lowercase )
_lowerCamelCase : List[str] = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-3 ) )
_lowerCamelCase : List[str] = tf.function(lowercase , jit_compile=lowercase )
_lowerCamelCase : Union[str, Any] = xla_generate(lowercase , lowercase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-2 ) )
@require_tf
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
super().setUp()
_lowerCamelCase : List[Any] = 'facebook/opt-350m'
def A_ ( self ):
_lowerCamelCase : int = TFOPTForCausalLM.from_pretrained(self.path_model )
_lowerCamelCase : List[Any] = GPTaTokenizer.from_pretrained(self.path_model )
_lowerCamelCase : List[str] = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_lowerCamelCase : List[Any] = tokenizer(lowercase , return_tensors='tf' , padding=lowercase , add_special_tokens=lowercase )
_lowerCamelCase : Optional[int] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
_lowerCamelCase : Any = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) )
_lowerCamelCase : Tuple = tf.function(lowercase , jit_compile=lowercase )
_lowerCamelCase : List[Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) )
@require_tf
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def A_ ( self ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def A_ ( self ):
_lowerCamelCase : str = 'facebook/opt-125m'
_lowerCamelCase : Dict = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Optional[int] = GPTaTokenizer.from_pretrained(lowercase )
_lowerCamelCase : Dict = TFOPTForCausalLM.from_pretrained(lowercase )
for prompt in self.prompts:
_lowerCamelCase : int = tokenizer(lowercase , return_tensors='tf' ).input_ids
_lowerCamelCase : int = model.generate(lowercase , max_length=10 )
_lowerCamelCase : Any = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
predicted_outputs += generated_string
self.assertListEqual(lowercase , lowercase )
def A_ ( self ):
_lowerCamelCase : List[Any] = 'facebook/opt-350m'
_lowerCamelCase : int = GPTaTokenizer.from_pretrained(lowercase )
_lowerCamelCase : Optional[int] = TFOPTForCausalLM.from_pretrained(lowercase )
_lowerCamelCase : Any = 'left'
# use different length sentences to test batching
_lowerCamelCase : Optional[int] = [
'Hello, my dog is a little',
'Today, I',
]
_lowerCamelCase : Dict = tokenizer(lowercase , return_tensors='tf' , padding=lowercase )
_lowerCamelCase : int = inputs['input_ids']
_lowerCamelCase : Tuple = model.generate(input_ids=lowercase , attention_mask=inputs['attention_mask'] )
_lowerCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
_lowerCamelCase : Union[str, Any] = model.generate(input_ids=lowercase )
_lowerCamelCase : Dict = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
_lowerCamelCase : int = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
_lowerCamelCase : Union[str, Any] = model.generate(input_ids=lowercase , max_length=model.config.max_length - num_paddings )
_lowerCamelCase : List[Any] = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
_lowerCamelCase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase )
_lowerCamelCase : Optional[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase )
_lowerCamelCase : Optional[Any] = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , [non_padded_sentence, padded_sentence] )
def A_ ( self ):
_lowerCamelCase : Tuple = 'facebook/opt-350m'
_lowerCamelCase : List[Any] = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Optional[Any] = GPTaTokenizer.from_pretrained(lowercase )
_lowerCamelCase : Optional[Any] = TFOPTForCausalLM.from_pretrained(lowercase )
for prompt in self.prompts:
_lowerCamelCase : List[Any] = tokenizer(lowercase , return_tensors='tf' ).input_ids
_lowerCamelCase : Optional[Any] = model.generate(lowercase , max_length=10 )
_lowerCamelCase : Dict = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
predicted_outputs += generated_string
self.assertListEqual(lowercase , lowercase ) | 12 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase__ = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ['ConvNextFeatureExtractor']
lowercase__ = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure) | 362 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """philschmid/bart-large-cnn-samsum"""
lowerCamelCase__ = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
lowerCamelCase__ = """summarizer"""
lowerCamelCase__ = AutoTokenizer
lowerCamelCase__ = AutoModelForSeqaSeqLM
lowerCamelCase__ = ["""text"""]
lowerCamelCase__ = ["""text"""]
def A_ ( self , lowercase ):
return self.pre_processor(lowercase , return_tensors='pt' , truncation=lowercase )
def A_ ( self , lowercase ):
return self.model.generate(**lowercase )[0]
def A_ ( self , lowercase ):
return self.pre_processor.decode(lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase ) | 12 | 0 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _snake_case ( lowercase__ , lowercase__=0.9_9_9 , lowercase__="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase__ ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase__ ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_lowerCamelCase : List[str] = []
for i in range(__snake_case ):
_lowerCamelCase : Tuple = i / num_diffusion_timesteps
_lowerCamelCase : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__snake_case ) / alpha_bar_fn(__snake_case ) , __snake_case ) )
return torch.tensor(__snake_case , dtype=torch.floataa )
class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = [e.name for e in KarrasDiffusionSchedulers]
lowerCamelCase__ = 2
@register_to_config
def __init__( self , lowercase = 1000 , lowercase = 0.0_00_85 , lowercase = 0.0_12 , lowercase = "linear" , lowercase = None , lowercase = "epsilon" , lowercase = "linspace" , lowercase = 0 , ):
if trained_betas is not None:
_lowerCamelCase : Any = torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
elif beta_schedule == "linear":
_lowerCamelCase : Optional[Any] = torch.linspace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCamelCase : Optional[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _SCREAMING_SNAKE_CASE , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCamelCase : Optional[Any] = betas_for_alpha_bar(_SCREAMING_SNAKE_CASE )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_lowerCamelCase : Union[str, Any] = 1.0 - self.betas
_lowerCamelCase : Dict = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A_ ( self , lowercase , lowercase=None ):
if schedule_timesteps is None:
_lowerCamelCase : List[Any] = self.timesteps
_lowerCamelCase : Any = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_lowerCamelCase : int = 1 if len(_SCREAMING_SNAKE_CASE ) > 1 else 0
else:
_lowerCamelCase : str = timestep.cpu().item() if torch.is_tensor(_SCREAMING_SNAKE_CASE ) else timestep
_lowerCamelCase : Optional[Any] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def A_ ( self ):
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def A_ ( self , lowercase , lowercase , ):
_lowerCamelCase : Optional[int] = self.index_for_timestep(_SCREAMING_SNAKE_CASE )
if self.state_in_first_order:
_lowerCamelCase : Union[str, Any] = self.sigmas[step_index]
else:
_lowerCamelCase : str = self.sigmas_interpol[step_index]
_lowerCamelCase : List[str] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def A_ ( self , lowercase , lowercase = None , lowercase = None , ):
_lowerCamelCase : int = num_inference_steps
_lowerCamelCase : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_lowerCamelCase : Union[str, Any] = np.linspace(0 , num_train_timesteps - 1 , _SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_lowerCamelCase : Optional[int] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCamelCase : Optional[int] = (np.arange(0 , _SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1].copy().astype(_SCREAMING_SNAKE_CASE )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_lowerCamelCase : List[Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCamelCase : Tuple = (np.arange(_SCREAMING_SNAKE_CASE , 0 , -step_ratio )).round().copy().astype(_SCREAMING_SNAKE_CASE )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
_lowerCamelCase : Any = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_lowerCamelCase : Union[str, Any] = torch.from_numpy(np.log(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Optional[int] = np.interp(_SCREAMING_SNAKE_CASE , np.arange(0 , len(_SCREAMING_SNAKE_CASE ) ) , _SCREAMING_SNAKE_CASE )
_lowerCamelCase : Optional[Any] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_lowerCamelCase : List[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE )
# interpolate sigmas
_lowerCamelCase : Optional[Any] = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
_lowerCamelCase : str = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_lowerCamelCase : List[Any] = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
# mps does not support float64
_lowerCamelCase : Any = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
else:
_lowerCamelCase : Optional[int] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
# interpolate timesteps
_lowerCamelCase : List[str] = self.sigma_to_t(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE , dtype=timesteps.dtype )
_lowerCamelCase : Tuple = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
_lowerCamelCase : Optional[Any] = torch.cat([timesteps[:1], interleaved_timesteps] )
_lowerCamelCase : List[str] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_lowerCamelCase : List[str] = defaultdict(_SCREAMING_SNAKE_CASE )
def A_ ( self , lowercase ):
_lowerCamelCase : str = sigma.log()
# get distribution
_lowerCamelCase : Optional[int] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_lowerCamelCase : List[Any] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_lowerCamelCase : List[str] = low_idx + 1
_lowerCamelCase : str = self.log_sigmas[low_idx]
_lowerCamelCase : str = self.log_sigmas[high_idx]
# interpolate sigmas
_lowerCamelCase : Dict = (low - log_sigma) / (low - high)
_lowerCamelCase : Union[str, Any] = w.clamp(0 , 1 )
# transform interpolation to time range
_lowerCamelCase : List[Any] = (1 - w) * low_idx + w * high_idx
_lowerCamelCase : Union[str, Any] = t.view(sigma.shape )
return t
@property
def A_ ( self ):
return self.sample is None
def A_ ( self , lowercase , lowercase , lowercase , lowercase = True , ):
_lowerCamelCase : Tuple = self.index_for_timestep(_SCREAMING_SNAKE_CASE )
# advance index counter by 1
_lowerCamelCase : Dict = timestep.cpu().item() if torch.is_tensor(_SCREAMING_SNAKE_CASE ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_lowerCamelCase : List[str] = self.sigmas[step_index]
_lowerCamelCase : Optional[int] = self.sigmas_interpol[step_index + 1]
_lowerCamelCase : Any = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_lowerCamelCase : List[Any] = self.sigmas[step_index - 1]
_lowerCamelCase : Optional[int] = self.sigmas_interpol[step_index]
_lowerCamelCase : Optional[int] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : List[Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_lowerCamelCase : List[str] = sigma_hat if self.state_in_first_order else sigma_interpol
_lowerCamelCase : Tuple = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_lowerCamelCase : Optional[int] = sigma_hat if self.state_in_first_order else sigma_interpol
_lowerCamelCase : Optional[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample' )
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_lowerCamelCase : int = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_lowerCamelCase : str = sigma_interpol - sigma_hat
# store for 2nd order step
_lowerCamelCase : Dict = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_lowerCamelCase : Any = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_lowerCamelCase : List[str] = sigma_next - sigma_hat
_lowerCamelCase : str = self.sample
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : Union[str, Any] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE )
def A_ ( self , lowercase , lowercase , lowercase , ):
_lowerCamelCase : str = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_SCREAMING_SNAKE_CASE ):
# mps does not support float64
_lowerCamelCase : List[str] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_lowerCamelCase : Tuple = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_lowerCamelCase : Tuple = self.timesteps.to(original_samples.device )
_lowerCamelCase : Optional[int] = timesteps.to(original_samples.device )
_lowerCamelCase : List[Any] = [self.index_for_timestep(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for t in timesteps]
_lowerCamelCase : Union[str, Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_lowerCamelCase : Union[str, Any] = sigma.unsqueeze(-1 )
_lowerCamelCase : Optional[Any] = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps | 363 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : Tuple = list(range(len(lowercase__ ) ) )
_lowerCamelCase : Any = [v / w for v, w in zip(lowercase__ , lowercase__ )]
index.sort(key=lambda lowercase__ : ratio[i] , reverse=lowercase__ )
_lowerCamelCase : float = 0
_lowerCamelCase : list[float] = [0] * len(lowercase__ )
for i in index:
if weight[i] <= capacity:
_lowerCamelCase : int = 1
max_value += value[i]
capacity -= weight[i]
else:
_lowerCamelCase : Any = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 12 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ ):
_lowerCamelCase : Optional[Any] = len(_a )
while cur > 1:
# Find the maximum number in arr
_lowerCamelCase : Tuple = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_lowerCamelCase : Optional[Any] = arr[mi::-1] + arr[mi + 1 : len(_a )]
# Reverse whole list
_lowerCamelCase : Union[str, Any] = arr[cur - 1 :: -1] + arr[cur : len(_a )]
cur -= 1
return arr
if __name__ == "__main__":
lowercase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase__ = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 364 |
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowercase__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowercase__ = []
lowercase__ = []
lowercase__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowercase__ = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F"🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results",
"""emoji""": True,
},
}
]
lowercase__ = 0
for log in Path().glob("""*.log"""):
lowercase__ = 0
with open(log, """r""") as f:
for line in f:
lowercase__ = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowercase__ = line["""nodeid"""]
if line.get("""duration""", None) is not None:
lowercase__ = F"{line['duration']:.4f}"
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowercase__ = []
log.unlink()
lowercase__ = """"""
lowercase__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowercase__ = []
lowercase__ = {}
for test in failed_tests:
lowercase__ = test[0].split("""::""")
lowercase__ = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowercase__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowercase__ = [test[0] for test in failed_table]
lowercase__ = list(set(files))
# Count number of instances in failed_tests
lowercase__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowercase__ = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
lowercase__ = """Too many failed tests, please see the full report in the Action results."""
lowercase__ = len(err) + 10
lowercase__ = message[: 3000 - offset] + F"\n...\n```\n{err}"
print(F"### {message}")
else:
lowercase__ = """No failed tests! 🤗"""
print(F"## {message}")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowercase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowercase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowercase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F"https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
payload.append(action_button)
lowercase__ = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}",
}
],
}
payload.append(date_report)
lowercase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowercase__ = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowercase__ = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowercase__ = row[0]
else:
lowercase__ = """"""
lowercase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F"Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
) | 12 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
_lowerCamelCase : Optional[int] = mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
_lowerCamelCase : Optional[int] = max(
mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , j - wt[i - 1] ) + val[i - 1] , )
_lowerCamelCase : Tuple = val
return f[i][j]
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : Dict = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
_lowerCamelCase : int = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
_lowerCamelCase : str = dp[i - 1][w_]
return dp[n][w_], dp
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
if not (isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) )):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples' )
_lowerCamelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
if num_items != len(SCREAMING_SNAKE_CASE_ ):
_lowerCamelCase : Dict = (
'The number of weights must be the same as the number of values.\n'
f'''But got {num_items} weights and {len(SCREAMING_SNAKE_CASE_ )} values'''
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
if not isinstance(wt[i] , SCREAMING_SNAKE_CASE_ ):
_lowerCamelCase : Any = (
'All weights must be integers but got weight of '
f'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase, _lowerCamelCase : Dict = knapsack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Any = set()
_construct_solution(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return optimal_val, example_optional_set
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , i - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
optimal_set.add(SCREAMING_SNAKE_CASE_ )
_construct_solution(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , i - 1 , j - wt[i - 1] , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowercase__ = [3, 2, 4, 4]
lowercase__ = [4, 3, 2, 3]
lowercase__ = 4
lowercase__ = 6
lowercase__ = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
lowercase__ = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
lowercase__ = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset) | 365 |
"""simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """AutoTokenizer"""
lowerCamelCase__ = ["""tokenizer"""]
lowerCamelCase__ = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self , lowercase , lowercase=None ):
super().__init__(lowercase )
_lowerCamelCase : Optional[int] = speaker_embeddings
@classmethod
def A_ ( cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ):
if speaker_embeddings_dict_path is not None:
_lowerCamelCase : Optional[Any] = get_file_from_repo(
lowercase , lowercase , subfolder=kwargs.pop('subfolder' , lowercase ) , cache_dir=kwargs.pop('cache_dir' , lowercase ) , force_download=kwargs.pop('force_download' , lowercase ) , proxies=kwargs.pop('proxies' , lowercase ) , resume_download=kwargs.pop('resume_download' , lowercase ) , local_files_only=kwargs.pop('local_files_only' , lowercase ) , use_auth_token=kwargs.pop('use_auth_token' , lowercase ) , revision=kwargs.pop('revision' , lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'''`{os.path.join(lowercase , lowercase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
_lowerCamelCase : List[Any] = None
else:
with open(lowercase ) as speaker_embeddings_json:
_lowerCamelCase : Union[str, Any] = json.load(lowercase )
else:
_lowerCamelCase : Tuple = None
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase , **lowercase )
return cls(tokenizer=lowercase , speaker_embeddings=lowercase )
def A_ ( self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowercase , lowercase , 'v2' ) , exist_ok=lowercase )
_lowerCamelCase : int = {}
_lowerCamelCase : List[Any] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_lowerCamelCase : Optional[Any] = self._load_voice_preset(lowercase )
_lowerCamelCase : Any = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , lowercase , F'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=lowercase , )
_lowerCamelCase : List[str] = os.path.join(lowercase , F'''{prompt_key}_{key}.npy''' )
_lowerCamelCase : Optional[Any] = tmp_dict
with open(os.path.join(lowercase , lowercase ) , 'w' ) as fp:
json.dump(lowercase , lowercase )
super().save_pretrained(lowercase , lowercase , **lowercase )
def A_ ( self , lowercase = None , **lowercase ):
_lowerCamelCase : Tuple = self.speaker_embeddings[voice_preset]
_lowerCamelCase : Any = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
_lowerCamelCase : Union[str, Any] = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , lowercase ) , cache_dir=kwargs.pop('cache_dir' , lowercase ) , force_download=kwargs.pop('force_download' , lowercase ) , proxies=kwargs.pop('proxies' , lowercase ) , resume_download=kwargs.pop('resume_download' , lowercase ) , local_files_only=kwargs.pop('local_files_only' , lowercase ) , use_auth_token=kwargs.pop('use_auth_token' , lowercase ) , revision=kwargs.pop('revision' , lowercase ) , )
if path is None:
raise ValueError(
F'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
_lowerCamelCase : List[str] = np.load(lowercase )
return voice_preset_dict
def A_ ( self , lowercase = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ):
if voice_preset is not None and not isinstance(lowercase , lowercase ):
if (
isinstance(lowercase , lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_lowerCamelCase : Any = self._load_voice_preset(lowercase )
else:
if isinstance(lowercase , lowercase ) and not voice_preset.endswith('.npz' ):
_lowerCamelCase : Optional[Any] = voice_preset + '.npz'
_lowerCamelCase : Union[str, Any] = np.load(lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(lowercase , **lowercase )
_lowerCamelCase : Tuple = BatchFeature(data=lowercase , tensor_type=lowercase )
_lowerCamelCase : Any = self.tokenizer(
lowercase , return_tensors=lowercase , padding='max_length' , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , )
if voice_preset is not None:
_lowerCamelCase : Optional[int] = voice_preset
return encoded_text | 12 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__ ( lowerCamelCase_, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = BlenderbotSmallTokenizer
lowerCamelCase__ = False
def A_ ( self ):
super().setUp()
_lowerCamelCase : Optional[Any] = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
_lowerCamelCase : List[str] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
_lowerCamelCase : Any = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
_lowerCamelCase : Any = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
_lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_UpperCAmelCase ) )
def A_ ( self , **lowercase ):
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def A_ ( self , lowercase ):
_lowerCamelCase : Tuple = 'adapt act apte'
_lowerCamelCase : Any = 'adapt act apte'
return input_text, output_text
def A_ ( self ):
_lowerCamelCase : List[str] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCamelCase : Optional[int] = 'adapt act apte'
_lowerCamelCase : int = ['adapt', 'act', 'ap@@', 'te']
_lowerCamelCase : Optional[int] = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCamelCase : Tuple = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_lowerCamelCase : List[Any] = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def A_ ( self ):
_lowerCamelCase : List[Any] = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
_lowerCamelCase : List[str] = 'I am a small frog.'
_lowerCamelCase : str = tok([src_text] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )['input_ids']
_lowerCamelCase : Tuple = tok.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
_lowerCamelCase : Any = 'I am a small frog .'
_lowerCamelCase : str = '.'
_lowerCamelCase : Optional[int] = tok(_UpperCAmelCase )['input_ids']
_lowerCamelCase : Union[str, Any] = tok(_UpperCAmelCase )['input_ids']
assert encoded[-1] == encoded_dot[0] | 366 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowercase__ = False
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : Tuple = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
_lowerCamelCase : Dict = torch.manual_seed(0 )
_lowerCamelCase : Dict = pipe(
image=lowercase , generator=lowercase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
_lowerCamelCase : str = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCamelCase : Any = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 12 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowercase__ = 25_6047
lowercase__ = 25_6145
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( _a, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = NllbTokenizer
lowerCamelCase__ = NllbTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = {}
def A_ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : Union[str, Any] = NllbTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self ):
_lowerCamelCase : str = NllbTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
_lowerCamelCase : Tuple = tokenizer.tokenize('This is a test' )
self.assertListEqual(__lowerCamelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCamelCase : Union[str, Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowerCamelCase : str = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCamelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def A_ ( self ):
_lowerCamelCase : List[Any] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
_lowerCamelCase : Optional[int] = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
_lowerCamelCase : Any = tempfile.mkdtemp()
_lowerCamelCase : Union[str, Any] = tokenizer_r.save_pretrained(__lowerCamelCase )
_lowerCamelCase : Any = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
_lowerCamelCase : List[Any] = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase )
# Checks everything loads correctly in the same way
_lowerCamelCase : Dict = tokenizer_r.from_pretrained(__lowerCamelCase )
_lowerCamelCase : List[Any] = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=True
_lowerCamelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCamelCase : Optional[int] = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase )
_lowerCamelCase : Dict = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase )
# Checks everything loads correctly in the same way
_lowerCamelCase : List[str] = tokenizer_r.from_pretrained(__lowerCamelCase )
_lowerCamelCase : Dict = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=False
_lowerCamelCase : Tuple = tempfile.mkdtemp()
_lowerCamelCase : Union[str, Any] = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase )
_lowerCamelCase : List[str] = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCamelCase : Optional[int] = tokenizer_r.from_pretrained(__lowerCamelCase )
_lowerCamelCase : int = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
@require_torch
def A_ ( self ):
if not self.test_seqaseq:
return
_lowerCamelCase : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Longer text that will definitely require truncation.
_lowerCamelCase : Optional[Any] = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"""
""" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"""
""" will only worsen the violence and misery for millions of people.""",
]
_lowerCamelCase : Union[str, Any] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"""
""" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"""
""" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
try:
_lowerCamelCase : Any = tokenizer.prepare_seqaseq_batch(
src_texts=__lowerCamelCase , tgt_texts=__lowerCamelCase , max_length=3 , max_target_length=10 , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='ron_Latn' , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
_lowerCamelCase : List[str] = tokenizer.prepare_seqaseq_batch(
__lowerCamelCase , tgt_texts=__lowerCamelCase , max_length=3 , return_tensors='pt' )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
_lowerCamelCase : Tuple = tokenizer.prepare_seqaseq_batch(
src_texts=__lowerCamelCase , max_length=3 , max_target_length=10 , return_tensors='pt' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn('decoder_input_ids' , __lowerCamelCase )
@unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.' )
def A_ ( self ):
pass
def A_ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : List[str] = [AddedToken('<special>' , lstrip=__lowerCamelCase )]
_lowerCamelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase )
_lowerCamelCase : Optional[int] = tokenizer_r.encode('Hey this is a <special> token' )
_lowerCamelCase : List[Any] = tokenizer_r.encode('<special>' , add_special_tokens=__lowerCamelCase )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
_lowerCamelCase : str = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
_lowerCamelCase : Dict = self.tokenizer_class.from_pretrained(
__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase )
_lowerCamelCase : Union[str, Any] = tokenizer_p.encode('Hey this is a <special> token' )
_lowerCamelCase : List[str] = tokenizer_cr.encode('Hey this is a <special> token' )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = """facebook/nllb-200-distilled-600M"""
lowerCamelCase__ = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
lowerCamelCase__ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
lowerCamelCase__ = [
25_60_47,
1_62_97,
13_44_08,
81_65,
24_80_66,
1_47_34,
9_50,
11_35,
10_57_21,
35_73,
83,
2_73_52,
1_08,
4_94_86,
2,
]
@classmethod
def A_ ( cls ):
_lowerCamelCase : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='eng_Latn' , tgt_lang='ron_Latn' )
_lowerCamelCase : List[Any] = 1
return cls
def A_ ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab'] , 256001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn'] , 256002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn'] , 256057 )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowerCamelCase )
def A_ ( self ):
self.assertIn(__lowerCamelCase , self.tokenizer.all_special_ids )
# fmt: off
_lowerCamelCase : Optional[int] = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047]
# fmt: on
_lowerCamelCase : List[Any] = self.tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
_lowerCamelCase : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token , __lowerCamelCase )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , __lowerCamelCase )
_lowerCamelCase : int = 10
_lowerCamelCase : List[Any] = self.tokenizer(__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , __lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
def A_ ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [256203, 3] )
def A_ ( self ):
_lowerCamelCase : List[str] = tempfile.mkdtemp()
_lowerCamelCase : Tuple = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowerCamelCase )
_lowerCamelCase : Dict = NllbTokenizer.from_pretrained(__lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCamelCase )
@require_torch
def A_ ( self ):
_lowerCamelCase : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
_lowerCamelCase : Tuple = shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['ron_Latn'] )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
_lowerCamelCase : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def A_ ( self ):
_lowerCamelCase : Dict = self.tokenizer(self.src_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=3 , return_tensors='pt' )
_lowerCamelCase : int = self.tokenizer(
text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=10 , return_tensors='pt' )
_lowerCamelCase : str = targets["""input_ids"""]
_lowerCamelCase : Dict = shift_tokens_right(
__lowerCamelCase , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def A_ ( self ):
_lowerCamelCase : Optional[int] = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , {
# A, test, EOS, en_XX
'input_ids': [[256047, 70, 7356, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 256057,
} , )
@require_torch
def A_ ( self ):
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Tuple = self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2, 256047] )
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Dict = self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [256047, 16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2] )
| 367 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
lowercase__ = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
lowercase__ = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
lowercase__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def _snake_case ( lowercase__ ):
_lowerCamelCase : Tuple = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def _snake_case ( lowercase__ ):
return x[0]
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[Any] = get_letter_count(lowercase__ )
_lowerCamelCase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowercase__ )
_lowerCamelCase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowercase__ )
_lowerCamelCase : Optional[int] = ''.join(freq_to_letter[freq] )
_lowerCamelCase : Any = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowercase__ , reverse=lowercase__ )
_lowerCamelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowercase__ )
def _snake_case ( lowercase__ ):
_lowerCamelCase : str = get_frequency_order(lowercase__ )
_lowerCamelCase : Union[str, Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod() | 12 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCAmelCase__ ( metaclass=lowercase ):
'''simple docstring'''
lowerCamelCase__ = ["""torch""", """scipy"""]
def __init__( self , *lowercase , **lowercase ):
requires_backends(self , ['torch', 'scipy'] )
@classmethod
def A_ ( cls , *lowercase , **lowercase ):
requires_backends(cls , ['torch', 'scipy'] )
@classmethod
def A_ ( cls , *lowercase , **lowercase ):
requires_backends(cls , ['torch', 'scipy'] ) | 368 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase ):
_lowerCamelCase : Dict = question_encoder
_lowerCamelCase : List[Any] = generator
_lowerCamelCase : Optional[Any] = self.question_encoder
def A_ ( self , lowercase ):
if os.path.isfile(lowercase ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(lowercase , exist_ok=lowercase )
_lowerCamelCase : List[Any] = os.path.join(lowercase , 'question_encoder_tokenizer' )
_lowerCamelCase : Dict = os.path.join(lowercase , 'generator_tokenizer' )
self.question_encoder.save_pretrained(lowercase )
self.generator.save_pretrained(lowercase )
@classmethod
def A_ ( cls , lowercase , **lowercase ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_lowerCamelCase : Optional[int] = kwargs.pop('config' , lowercase )
if config is None:
_lowerCamelCase : int = RagConfig.from_pretrained(lowercase )
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(
lowercase , config=config.question_encoder , subfolder='question_encoder_tokenizer' )
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained(
lowercase , config=config.generator , subfolder='generator_tokenizer' )
return cls(question_encoder=lowercase , generator=lowercase )
def __call__( self , *lowercase , **lowercase ):
return self.current_tokenizer(*lowercase , **lowercase )
def A_ ( self , *lowercase , **lowercase ):
return self.generator.batch_decode(*lowercase , **lowercase )
def A_ ( self , *lowercase , **lowercase ):
return self.generator.decode(*lowercase , **lowercase )
def A_ ( self ):
_lowerCamelCase : Any = self.question_encoder
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.generator
def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = "longest" , lowercase = None , lowercase = True , **lowercase , ):
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , lowercase , )
if max_length is None:
_lowerCamelCase : Optional[Any] = self.current_tokenizer.model_max_length
_lowerCamelCase : Optional[Any] = self(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , max_length=lowercase , padding=lowercase , truncation=lowercase , **lowercase , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowerCamelCase : int = self.current_tokenizer.model_max_length
_lowerCamelCase : str = self(
text_target=lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase , **lowercase , )
_lowerCamelCase : int = labels['input_ids']
return model_inputs | 12 | 0 |
"""simple docstring"""
from typing import Any
import numpy as np
def _snake_case ( lowercase__ ):
return np.array_equal(_lowerCamelCase , matrix.conjugate().T )
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Union[str, Any] = v.conjugate().T
_lowerCamelCase : Any = v_star.dot(_lowerCamelCase )
assert isinstance(_lowerCamelCase , np.ndarray )
return (v_star_dot.dot(_lowerCamelCase )) / (v_star.dot(_lowerCamelCase ))
def _snake_case ( ):
_lowerCamelCase : List[str] = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
_lowerCamelCase : int = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCamelCase ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(_lowerCamelCase , _lowerCamelCase ) )
_lowerCamelCase : Dict = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCamelCase ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(_lowerCamelCase , _lowerCamelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests() | 369 |
"""simple docstring"""
def _snake_case ( lowercase__ = 10 ):
if not isinstance(lowercase__ , lowercase__ ) or n < 0:
raise ValueError('Invalid input' )
_lowerCamelCase : str = 10**n
_lowerCamelCase : Union[str, Any] = 28433 * (pow(2 , 7830457 , lowercase__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"{solution(10) = }") | 12 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = StableDiffusionXLImgaImgPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {"""latents"""}
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=_SCREAMING_SNAKE_CASE , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
_lowerCamelCase : str = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , )
torch.manual_seed(0 )
_lowerCamelCase : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=32 , )
_lowerCamelCase : Any = CLIPTextModel(_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Union[str, Any] = CLIPTextModelWithProjection(_SCREAMING_SNAKE_CASE )
_lowerCamelCase : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=_SCREAMING_SNAKE_CASE )
_lowerCamelCase : List[str] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def A_ ( self , lowercase , lowercase=0 ):
_lowerCamelCase : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Optional[int] = image / 2 + 0.5
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
_lowerCamelCase : Any = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
_lowerCamelCase : Tuple = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
_lowerCamelCase : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.75,
}
return inputs
def A_ ( self ):
_lowerCamelCase : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : List[str] = self.get_dummy_components()
_lowerCamelCase : List[Any] = StableDiffusionXLImgaImgPipeline(**_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Tuple = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_lowerCamelCase : str = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
_lowerCamelCase : str = sd_pipe(**_SCREAMING_SNAKE_CASE ).images
_lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCamelCase : Optional[Any] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A_ ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def A_ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def A_ ( self ):
pass
def A_ ( self ):
_lowerCamelCase : Optional[int] = self.get_dummy_components()
_lowerCamelCase : Optional[Any] = StableDiffusionXLImgaImgPipeline(**_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Optional[int] = sd_pipe.to(_SCREAMING_SNAKE_CASE )
_lowerCamelCase : List[Any] = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
# forward without prompt embeds
_lowerCamelCase : Optional[Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Union[str, Any] = 3 * ['''this is a negative prompt''']
_lowerCamelCase : List[str] = negative_prompt
_lowerCamelCase : Dict = 3 * [inputs['''prompt''']]
_lowerCamelCase : str = sd_pipe(**_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Any = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_lowerCamelCase : Any = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
_lowerCamelCase : List[Any] = 3 * ['''this is a negative prompt''']
_lowerCamelCase : Tuple = 3 * [inputs.pop('prompt' )]
(
_lowerCamelCase
) : Dict = sd_pipe.encode_prompt(_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE )
_lowerCamelCase : str = sd_pipe(
**_SCREAMING_SNAKE_CASE , prompt_embeds=_SCREAMING_SNAKE_CASE , negative_prompt_embeds=_SCREAMING_SNAKE_CASE , pooled_prompt_embeds=_SCREAMING_SNAKE_CASE , negative_pooled_prompt_embeds=_SCREAMING_SNAKE_CASE , )
_lowerCamelCase : Dict = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self , lowercase , lowercase="cpu" , lowercase=torch.floataa , lowercase=0 ):
_lowerCamelCase : Optional[int] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
_lowerCamelCase : str = np.random.RandomState(_SCREAMING_SNAKE_CASE ).standard_normal((1, 4, 64, 64) )
_lowerCamelCase : Optional[int] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Any = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def A_ ( self ):
_lowerCamelCase : int = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Optional[Any] = self.get_inputs(_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Optional[Any] = pipe(**_SCREAMING_SNAKE_CASE ).images
_lowerCamelCase : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_lowerCamelCase : Union[str, Any] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3 | 370 |
"""simple docstring"""
import argparse
import datetime
def _snake_case ( lowercase__ ):
_lowerCamelCase : Dict = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
_lowerCamelCase : str = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(lowercase__ ) < 11:
raise ValueError('Must be 10 characters long' )
# Get month
_lowerCamelCase : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('Month must be between 1 - 12' )
_lowerCamelCase : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get day
_lowerCamelCase : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('Date must be between 1 - 31' )
# Get second separator
_lowerCamelCase : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get year
_lowerCamelCase : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?' )
# Get datetime obj for validation
_lowerCamelCase : str = datetime.date(int(lowercase__ ) , int(lowercase__ ) , int(lowercase__ ) )
# Start math
if m <= 2:
_lowerCamelCase : str = y - 1
_lowerCamelCase : Tuple = m + 12
# maths var
_lowerCamelCase : int = int(str(lowercase__ )[:2] )
_lowerCamelCase : int = int(str(lowercase__ )[2:] )
_lowerCamelCase : int = int(2.6 * m - 5.3_9 )
_lowerCamelCase : int = int(c / 4 )
_lowerCamelCase : int = int(k / 4 )
_lowerCamelCase : int = int(d + k )
_lowerCamelCase : int = int(t + u + v + x )
_lowerCamelCase : int = int(z - (2 * c) )
_lowerCamelCase : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.' )
# Response
_lowerCamelCase : str = f'''Your date {date_input}, is a {days[str(lowercase__ )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = argparse.ArgumentParser(
description=(
"""Find out what day of the week nearly any date is or was. Enter """
"""date as a string in the mm-dd-yyyy or mm/dd/yyyy format"""
)
)
parser.add_argument(
"""date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)"""
)
lowercase__ = parser.parse_args()
zeller(args.date_input) | 12 | 0 |
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class lowerCAmelCase__ ( a_ ):
'''simple docstring'''
def __lt__( self , lowercase ):
return self[-1] < other[-1]
def __eq__( self , lowercase ):
return self[-1] == other[-1]
def _snake_case ( lowercase__ ):
_lowerCamelCase : str = []
# sort into stacks
for element in collection:
_lowerCamelCase : str = Stack([element] )
_lowerCamelCase : Optional[int] = bisect_left(lowerCamelCase__ , lowerCamelCase__ )
if i != len(lowerCamelCase__ ):
stacks[i].append(lowerCamelCase__ )
else:
stacks.append(lowerCamelCase__ )
# use a heap-based merge to merge stack efficiently
_lowerCamelCase : Tuple = merge(*(reversed(lowerCamelCase__ ) for stack in stacks) )
return collection
if __name__ == "__main__":
lowercase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase__ = [int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted)) | 371 |
"""simple docstring"""
import re
def _snake_case ( lowercase__ ):
_lowerCamelCase : Optional[int] = re.compile(r'^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$' )
if match := re.search(lowercase__ , lowercase__ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("""+918827897895""")) | 12 | 0 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = Dict[str, Any]
lowercase__ = List[Prediction]
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , *lowercase , **lowercase ):
super().__init__(*_snake_case , **_snake_case )
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , 'vision' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def A_ ( self , **lowercase ):
_lowerCamelCase : List[str] = {}
if "threshold" in kwargs:
_lowerCamelCase : Any = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self , *lowercase , **lowercase ):
return super().__call__(*_snake_case , **_snake_case )
def A_ ( self , lowercase ):
_lowerCamelCase : str = load_image(_snake_case )
_lowerCamelCase : Union[str, Any] = torch.IntTensor([[image.height, image.width]] )
_lowerCamelCase : Dict = self.image_processor(images=[image] , return_tensors='pt' )
if self.tokenizer is not None:
_lowerCamelCase : Any = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' )
_lowerCamelCase : Union[str, Any] = target_size
return inputs
def A_ ( self , lowercase ):
_lowerCamelCase : List[Any] = model_inputs.pop('target_size' )
_lowerCamelCase : Union[str, Any] = self.model(**_snake_case )
_lowerCamelCase : Optional[Any] = outputs.__class__({'target_size': target_size, **outputs} )
if self.tokenizer is not None:
_lowerCamelCase : Union[str, Any] = model_inputs["bbox"]
return model_outputs
def A_ ( self , lowercase , lowercase=0.9 ):
_lowerCamelCase : List[Any] = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_lowerCamelCase : Union[str, Any] = target_size[0].tolist()
def unnormalize(lowercase ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
_lowerCamelCase : int = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_lowerCamelCase : Dict = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_lowerCamelCase : Tuple = [unnormalize(_snake_case ) for bbox in model_outputs["bbox"].squeeze(0 )]
_lowerCamelCase : List[Any] = ["score", "label", "box"]
_lowerCamelCase : List[Any] = [dict(zip(_snake_case , _snake_case ) ) for vals in zip(scores.tolist() , _snake_case , _snake_case ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_lowerCamelCase : List[str] = self.image_processor.post_process_object_detection(_snake_case , _snake_case , _snake_case )
_lowerCamelCase : Union[str, Any] = raw_annotations[0]
_lowerCamelCase : Optional[Any] = raw_annotation["scores"]
_lowerCamelCase : Union[str, Any] = raw_annotation["labels"]
_lowerCamelCase : Dict = raw_annotation["boxes"]
_lowerCamelCase : str = scores.tolist()
_lowerCamelCase : List[Any] = [self.model.config.idalabel[label.item()] for label in labels]
_lowerCamelCase : List[str] = [self._get_bounding_box(_snake_case ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_lowerCamelCase : List[Any] = ["score", "label", "box"]
_lowerCamelCase : Any = [
dict(zip(_snake_case , _snake_case ) )
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] )
]
return annotation
def A_ ( self , lowercase ):
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' )
_lowerCamelCase : Optional[Any] = box.int().tolist()
_lowerCamelCase : Optional[Any] = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox | 350 |
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowercase__ = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowercase__ = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
lowercase__ = """zero2"""
lowercase__ = """zero3"""
lowercase__ = [ZEROa, ZEROa]
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_lowerCamelCase : List[str] = parameterized.to_safe_name('_'.join(str(lowercase__ ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
lowercase__ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
def A_ ( self , lowercase ):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = True , lowercase = True , lowercase = True , ):
_lowerCamelCase : List[str] = models[model]
_lowerCamelCase : Optional[int] = self.run_trainer(
stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , )
self.do_checks(lowercase )
return output_dir
def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = 1 , lowercase = True , lowercase = True , ):
_lowerCamelCase : List[str] = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase )
_lowerCamelCase : Any = F'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_lowerCamelCase : Optional[int] = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_lowerCamelCase : Optional[Any] = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_lowerCamelCase : Dict = self.get_launcher(lowercase )
_lowerCamelCase : Union[str, Any] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase , env=self.get_env() )
return output_dir
def A_ ( self , lowercase=False ):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
_lowerCamelCase : Any = min(2 , get_gpu_count() ) if distributed else 1
return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split() | 12 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase ):
super().__init__()
# make sure scheduler can always be converted to DDIM
_lowerCamelCase : Dict = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowercase , scheduler=lowercase )
@torch.no_grad()
def __call__( self , lowercase = 1 , lowercase = None , lowercase = 0.0 , lowercase = 50 , lowercase = None , lowercase = "pil" , lowercase = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , lowercase ):
_lowerCamelCase : str = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_lowerCamelCase : Dict = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowercase , lowercase ) and len(lowercase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(lowercase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
_lowerCamelCase : Tuple = randn_tensor(lowercase , generator=lowercase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_lowerCamelCase : List[Any] = self.unet(lowercase , lowercase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_lowerCamelCase : Optional[Any] = self.scheduler.step(
lowercase , lowercase , lowercase , eta=lowercase , use_clipped_model_output=lowercase , generator=lowercase ).prev_sample
_lowerCamelCase : Dict = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCamelCase : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCamelCase : Dict = self.numpy_to_pil(lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase ) | 351 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = ["""pixel_values"""]
def __init__( self , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = 8 , **lowercase , ):
super().__init__(**lowercase )
_lowerCamelCase : Optional[Any] = do_rescale
_lowerCamelCase : Union[str, Any] = rescale_factor
_lowerCamelCase : Any = do_pad
_lowerCamelCase : Optional[int] = pad_size
def A_ ( self , lowercase , lowercase , lowercase = None , **lowercase ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A_ ( self , lowercase , lowercase , lowercase = None ):
_lowerCamelCase, _lowerCamelCase : Tuple = get_image_size(lowercase )
_lowerCamelCase : Union[str, Any] = (old_height // size + 1) * size - old_height
_lowerCamelCase : Tuple = (old_width // size + 1) * size - old_width
return pad(lowercase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=lowercase )
def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
_lowerCamelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Any = do_pad if do_pad is not None else self.do_pad
_lowerCamelCase : int = pad_size if pad_size is not None else self.pad_size
_lowerCamelCase : Dict = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
_lowerCamelCase : Dict = [to_numpy_array(lowercase ) for image in images]
if do_rescale:
_lowerCamelCase : str = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_pad:
_lowerCamelCase : str = [self.pad(lowercase , size=lowercase ) for image in images]
_lowerCamelCase : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
_lowerCamelCase : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=lowercase , tensor_type=lowercase ) | 12 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 352 |
"""simple docstring"""
import os
import string
import sys
lowercase__ = 1 << 8
lowercase__ = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
lowercase__ = KEYMAP["""up"""]
lowercase__ = KEYMAP["""left"""]
if sys.platform == "win32":
lowercase__ = []
lowercase__ = {
B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
lowercase__ = ord(str(i))
def _snake_case ( ):
if os.name == "nt":
import msvcrt
_lowerCamelCase : Any = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(lowercase__ ) == 0:
# Read the keystroke
_lowerCamelCase : str = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_lowerCamelCase : List[Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_lowerCamelCase : Union[str, Any] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(lowercase__ )
if ord(lowercase__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_lowerCamelCase : List[Any] = chr(KEYMAP['esc'] )
except KeyError:
_lowerCamelCase : int = cha[1]
else:
_lowerCamelCase : Optional[int] = ch.decode(lowercase__ )
else:
_lowerCamelCase : Union[str, Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_lowerCamelCase : List[str] = sys.stdin.fileno()
_lowerCamelCase : Tuple = termios.tcgetattr(lowercase__ )
try:
tty.setraw(lowercase__ )
_lowerCamelCase : Optional[Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(lowercase__ , termios.TCSADRAIN , lowercase__ )
return ch
def _snake_case ( ):
_lowerCamelCase : int = get_raw_chars()
if ord(lowercase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(lowercase__ ) == KEYMAP["esc"]:
_lowerCamelCase : Union[str, Any] = get_raw_chars()
if ord(lowercase__ ) == KEYMAP["mod_int"]:
_lowerCamelCase : List[Any] = get_raw_chars()
if ord(lowercase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowercase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(lowercase__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"] | 12 | 0 |
"""simple docstring"""
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowercase__ = TypeVar("""T""")
class lowerCAmelCase__ ( Generic[T] ):
'''simple docstring'''
def __init__( self , lowercase = True ):
_lowerCamelCase : dict[T, list[T]] = {} # dictionary of lists
_lowerCamelCase : Dict = directed
def A_ ( self , lowercase , lowercase ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase )
self.adj_list[destination_vertex].append(lowercase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase )
_lowerCamelCase : Any = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(lowercase )
_lowerCamelCase : List[Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
_lowerCamelCase : Tuple = [destination_vertex]
_lowerCamelCase : List[Any] = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase )
_lowerCamelCase : Union[str, Any] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
_lowerCamelCase : Any = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
_lowerCamelCase : Dict = [destination_vertex]
_lowerCamelCase : List[str] = []
return self
def __repr__( self ):
return pformat(self.adj_list ) | 353 |
"""simple docstring"""
from typing import Any
def _snake_case ( lowercase__ ):
if not input_list:
return []
_lowerCamelCase : Any = [input_list.count(lowercase__ ) for value in input_list]
_lowerCamelCase : Dict = max(lowercase__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod() | 12 | 0 |
"""simple docstring"""
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : List[Any] = logging.get_logger()
# the current default level is logging.WARNING
_lowerCamelCase : int = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(lowerCamelCase_ )
def A_ ( self ):
_lowerCamelCase : Optional[int] = logging.get_verbosity()
_lowerCamelCase : Dict = logging.get_logger('transformers.models.bart.tokenization_bart' )
_lowerCamelCase : Optional[Any] = 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(lowerCamelCase_ ) as cl:
logger.warning(lowerCamelCase_ )
self.assertEqual(cl.out , msg + '\n' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(lowerCamelCase_ ) as cl:
logger.warning(lowerCamelCase_ )
self.assertEqual(cl.out , '' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(lowerCamelCase_ ) as cl:
logger.warning(lowerCamelCase_ )
self.assertEqual(cl.out , msg + '\n' )
# restore to the original level
logging.set_verbosity(lowerCamelCase_ )
@mockenv(TRANSFORMERS_VERBOSITY='error' )
def A_ ( self ):
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
_lowerCamelCase : Tuple = logging.get_logger('transformers.models.bart.tokenization_bart' )
_lowerCamelCase : int = os.getenv('TRANSFORMERS_VERBOSITY' , lowerCamelCase_ )
_lowerCamelCase : List[Any] = logging.log_levels[env_level_str]
_lowerCamelCase : Optional[int] = logging.get_verbosity()
self.assertEqual(
lowerCamelCase_ , lowerCamelCase_ , F'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
_lowerCamelCase : Any = ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error' )
def A_ ( self ):
transformers.utils.logging._reset_library_root_logger()
_lowerCamelCase : Dict = logging.logging.getLogger()
with CaptureLogger(lowerCamelCase_ ) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart' )
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out )
# no need to restore as nothing was changed
def A_ ( self ):
transformers.utils.logging._reset_library_root_logger()
_lowerCamelCase : List[str] = logging.get_logger('transformers.models.bart.tokenization_bart' )
_lowerCamelCase : Optional[int] = 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ):
# nothing should be logged as env var disables this method
with CaptureLogger(lowerCamelCase_ ) as cl:
logger.warning_advice(lowerCamelCase_ )
self.assertEqual(cl.out , '' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(lowerCamelCase_ ) as cl:
logger.warning_advice(lowerCamelCase_ )
self.assertEqual(cl.out , msg + '\n' )
def _snake_case ( ):
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled() | 354 |
"""simple docstring"""
def _snake_case ( lowercase__ ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
_lowerCamelCase : List[str] = len(lowercase__ )
_lowerCamelCase : List[str] = max(lowercase__ )
_lowerCamelCase : List[str] = min(lowercase__ )
# create the counting array
_lowerCamelCase : List[Any] = coll_max + 1 - coll_min
_lowerCamelCase : List[Any] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowercase__ ):
_lowerCamelCase : Optional[int] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
_lowerCamelCase : Dict = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowercase__ ) ):
_lowerCamelCase : Any = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _snake_case ( lowercase__ ):
return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
lowercase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase__ = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted)) | 12 | 0 |
"""simple docstring"""
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowercase__ = 'bert-base-cased'
lowercase__ = 'google/pegasus-xsum'
lowercase__ = [' Sam ate lunch today.', 'Sams lunch ingredients.']
lowercase__ = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee']
lowercase__ = 'patrickvonplaten/t5-tiny-random'
lowercase__ = 'sshleifer/bart-tiny-random'
lowercase__ = 'sshleifer/tiny-mbart'
lowercase__ = 'sshleifer/tiny-marian-en-de'
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Dict = '\n'.join(__UpperCAmelCase )
Path(__UpperCAmelCase ).open('w' ).writelines(__UpperCAmelCase )
def _snake_case ( lowercase__ ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__UpperCAmelCase , f'''{split}.source''' ) , __UpperCAmelCase )
_dump_articles(os.path.join(__UpperCAmelCase , f'''{split}.target''' ) , __UpperCAmelCase )
return tmp_dir
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def A_ ( self , lowercase ):
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(lowercase_ )
_lowerCamelCase : Optional[int] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_lowerCamelCase : int = max(len(tokenizer.encode(lowercase_ ) ) for a in ARTICLES )
_lowerCamelCase : Tuple = max(len(tokenizer.encode(lowercase_ ) ) for a in SUMMARIES )
_lowerCamelCase : Optional[Any] = 4
_lowerCamelCase : Union[str, Any] = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_lowerCamelCase, _lowerCamelCase : Any = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
_lowerCamelCase : List[str] = SeqaSeqDataset(
lowercase_ , data_dir=lowercase_ , type_path='train' , max_source_length=lowercase_ , max_target_length=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_ , )
_lowerCamelCase : List[str] = DataLoader(lowercase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowercase_ , lowercase_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_lowerCamelCase : Tuple = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def A_ ( self , lowercase ):
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained(lowercase_ )
_lowerCamelCase : str = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_lowerCamelCase : List[str] = max(len(tokenizer.encode(lowercase_ ) ) for a in ARTICLES )
_lowerCamelCase : Dict = max(len(tokenizer.encode(lowercase_ ) ) for a in SUMMARIES )
_lowerCamelCase : Dict = 4
_lowerCamelCase : Optional[Any] = LegacySeqaSeqDataset(
lowercase_ , data_dir=lowercase_ , type_path='train' , max_source_length=20 , max_target_length=lowercase_ , )
_lowerCamelCase : str = DataLoader(lowercase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def A_ ( self ):
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
_lowerCamelCase : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_lowerCamelCase : int = tmp_dir.joinpath('train.source' ).open().readlines()
_lowerCamelCase : int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowercase_ , lowercase_ , 128 , lowercase_ )
_lowerCamelCase : Any = {x.name for x in tmp_dir.iterdir()}
_lowerCamelCase : int = {x.name for x in save_dir.iterdir()}
_lowerCamelCase : Any = save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowercase_ ) < len(lowercase_ )
assert len(lowercase_ ) == 1
assert len(packed_examples[0] ) == sum(len(lowercase_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def A_ ( self ):
if not FAIRSEQ_AVAILABLE:
return
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = self._get_dataset(max_len=64 )
_lowerCamelCase : Any = 64
_lowerCamelCase : Tuple = ds.make_dynamic_sampler(lowercase_ , required_batch_size_multiple=lowercase_ )
_lowerCamelCase : Dict = [len(lowercase_ ) for x in batch_sampler]
assert len(set(lowercase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowercase_ ) == len(lowercase_ ) # no dropped or added examples
_lowerCamelCase : Optional[Any] = DataLoader(lowercase_ , batch_sampler=lowercase_ , collate_fn=ds.collate_fn , num_workers=2 )
_lowerCamelCase : Dict = []
_lowerCamelCase : Any = []
for batch in data_loader:
_lowerCamelCase : Dict = batch['input_ids'].shape
_lowerCamelCase : List[Any] = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_lowerCamelCase : Union[str, Any] = np.product(batch['input_ids'].shape )
num_src_per_batch.append(lowercase_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowercase_ )
assert num_src_per_batch[0] == max(lowercase_ )
if failures:
raise AssertionError(F'''too many tokens in {len(lowercase_ )} batches''' )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = self._get_dataset(max_len=512 )
_lowerCamelCase : Tuple = 2
_lowerCamelCase : Any = ds.make_sortish_sampler(lowercase_ , shuffle=lowercase_ )
_lowerCamelCase : List[Any] = DataLoader(lowercase_ , batch_size=lowercase_ , collate_fn=ds.collate_fn , num_workers=2 )
_lowerCamelCase : Any = DataLoader(lowercase_ , batch_size=lowercase_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowercase_ )
_lowerCamelCase : Optional[int] = tokenizer.pad_token_id
def count_pad_tokens(lowercase , lowercase="input_ids" ):
return [batch[k].eq(lowercase_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowercase_ , k='labels' ) ) < sum(count_pad_tokens(lowercase_ , k='labels' ) )
assert sum(count_pad_tokens(lowercase_ ) ) < sum(count_pad_tokens(lowercase_ ) )
assert len(lowercase_ ) == len(lowercase_ )
def A_ ( self , lowercase=1000 , lowercase=128 ):
if os.getenv('USE_REAL_DATA' , lowercase_ ):
_lowerCamelCase : str = 'examples/seq2seq/wmt_en_ro'
_lowerCamelCase : Dict = max_len * 2 * 64
if not Path(lowercase_ ).joinpath('train.len' ).exists():
save_len_file(lowercase_ , lowercase_ )
else:
_lowerCamelCase : List[Any] = 'examples/seq2seq/test_data/wmt_en_ro'
_lowerCamelCase : List[str] = max_len * 4
save_len_file(lowercase_ , lowercase_ )
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase_ )
_lowerCamelCase : Union[str, Any] = SeqaSeqDataset(
lowercase_ , data_dir=lowercase_ , type_path='train' , max_source_length=lowercase_ , max_target_length=lowercase_ , n_obs=lowercase_ , )
return ds, max_tokens, tokenizer
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = self._get_dataset()
_lowerCamelCase : Any = set(DistributedSortishSampler(lowercase_ , 256 , num_replicas=2 , rank=0 , add_extra_examples=lowercase_ ) )
_lowerCamelCase : List[Any] = set(DistributedSortishSampler(lowercase_ , 256 , num_replicas=2 , rank=1 , add_extra_examples=lowercase_ ) )
assert idsa.intersection(lowercase_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def A_ ( self , lowercase ):
_lowerCamelCase : Any = AutoTokenizer.from_pretrained(lowercase_ , use_fast=lowercase_ )
if tok_name == MBART_TINY:
_lowerCamelCase : Tuple = SeqaSeqDataset(
lowercase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
_lowerCamelCase : str = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_lowerCamelCase : Optional[int] = SeqaSeqDataset(
lowercase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
_lowerCamelCase : List[Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowercase_ ) == 1 if tok_name == BART_TINY else len(lowercase_ ) == 0 | 355 |
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
lowercase__ = parser.parse_args()
lowercase__ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 12 | 0 |
"""simple docstring"""
import argparse
import os
import re
lowercase__ = '''src/transformers'''
# Pattern that looks at the indentation in a line.
lowercase__ = re.compile(R"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase__ = re.compile(R"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase__ = re.compile(R"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase__ = re.compile(R"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase__ = re.compile(R"""\[([^\]]+)\]""")
def _snake_case ( lowercase__ ):
_lowerCamelCase : Dict = _re_indent.search(lowercase_ )
return "" if search is None else search.groups()[0]
def _snake_case ( lowercase__ , lowercase__="" , lowercase__=None , lowercase__=None ):
_lowerCamelCase : int = 0
_lowerCamelCase : Optional[Any] = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(lowercase_ ):
index += 1
_lowerCamelCase : Union[str, Any] = ['\n'.join(lines[:index] )]
else:
_lowerCamelCase : List[Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCamelCase : List[str] = [lines[index]]
index += 1
while index < len(lowercase_ ) and (end_prompt is None or not lines[index].startswith(lowercase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowercase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(lowercase_ ) )
if index < len(lowercase_ ) - 1:
_lowerCamelCase : Optional[Any] = [lines[index + 1]]
index += 1
else:
_lowerCamelCase : Optional[int] = []
else:
blocks.append('\n'.join(lowercase_ ) )
_lowerCamelCase : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowercase_ ) > 0:
blocks.append('\n'.join(lowercase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowercase_ ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def _snake_case ( lowercase__ ):
def _inner(lowercase__ ):
return key(lowercase_ ).lower().replace('_' , '' )
return _inner
def _snake_case ( lowercase__ , lowercase__=None ):
# If no key is provided, we use a noop.
def noop(lowercase__ ):
return x
if key is None:
_lowerCamelCase : str = noop
# Constants are all uppercase, they go first.
_lowerCamelCase : Optional[Any] = [obj for obj in objects if key(lowercase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCamelCase : Any = [obj for obj in objects if key(lowercase_ )[0].isupper() and not key(lowercase_ ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCamelCase : List[str] = [obj for obj in objects if not key(lowercase_ )[0].isupper()]
_lowerCamelCase : Optional[int] = ignore_underscore(lowercase_ )
return sorted(lowercase_ , key=lowercase_ ) + sorted(lowercase_ , key=lowercase_ ) + sorted(lowercase_ , key=lowercase_ )
def _snake_case ( lowercase__ ):
# This inner function sort imports between [ ].
def _replace(lowercase__ ):
_lowerCamelCase : Dict = match.groups()[0]
if "," not in imports:
return f'''[{imports}]'''
_lowerCamelCase : Optional[int] = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCamelCase : Optional[Any] = keys[:-1]
return "[" + ", ".join([f'''\"{k}\"''' for k in sort_objects(lowercase_ )] ) + "]"
_lowerCamelCase : int = import_statement.split('\n' )
if len(lowercase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCamelCase : List[Any] = 2 if lines[1].strip() == '[' else 1
_lowerCamelCase : str = [(i, _re_strip_line.search(lowercase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCamelCase : Optional[Any] = sort_objects(lowercase_ , key=lambda lowercase__ : x[1] )
_lowerCamelCase : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowercase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCamelCase : str = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCamelCase : Union[str, Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCamelCase : int = keys[:-1]
_lowerCamelCase : Optional[Any] = get_indent(lines[1] ) + ', '.join([f'''\"{k}\"''' for k in sort_objects(lowercase_ )] )
return "\n".join(lowercase_ )
else:
# Finally we have to deal with imports fitting on one line
_lowerCamelCase : str = _re_bracket_content.sub(_replace , lowercase_ )
return import_statement
def _snake_case ( lowercase__ , lowercase__=True ):
with open(lowercase_ , encoding='utf-8' ) as f:
_lowerCamelCase : Any = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCamelCase : Union[str, Any] = split_code_in_indented_blocks(
lowercase_ , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowercase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCamelCase : int = main_blocks[block_idx]
_lowerCamelCase : Union[str, Any] = block.split('\n' )
# Get to the start of the imports.
_lowerCamelCase : Union[str, Any] = 0
while line_idx < len(lowercase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCamelCase : List[Any] = len(lowercase_ )
else:
line_idx += 1
if line_idx >= len(lowercase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCamelCase : str = '\n'.join(block_lines[line_idx:-1] )
_lowerCamelCase : Optional[int] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCamelCase : Optional[int] = split_code_in_indented_blocks(lowercase_ , indent_level=lowercase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCamelCase : Union[str, Any] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCamelCase : Tuple = [(pattern.search(lowercase_ ).groups()[0] if pattern.search(lowercase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCamelCase : List[str] = [(i, key) for i, key in enumerate(lowercase_ ) if key is not None]
_lowerCamelCase : Dict = [x[0] for x in sorted(lowercase_ , key=lambda lowercase__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : List[Any] = []
for i in range(len(lowercase_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowerCamelCase : int = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowercase_ )
count += 1
# And we put our main block back together with its first and last line.
_lowerCamelCase : Optional[int] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowercase_ ):
if check_only:
return True
else:
print(f'''Overwriting {file}.''' )
with open(lowercase_ , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(lowercase_ ) )
def _snake_case ( lowercase__=True ):
_lowerCamelCase : str = []
for root, _, files in os.walk(lowercase_ ):
if "__init__.py" in files:
_lowerCamelCase : List[Any] = sort_imports(os.path.join(lowercase_ , '__init__.py' ) , check_only=lowercase_ )
if result:
_lowerCamelCase : Optional[int] = [os.path.join(lowercase_ , '__init__.py' )]
if len(lowercase_ ) > 0:
raise ValueError(f'''Would overwrite {len(lowercase_ )} files, run `make style`.''' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowercase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 356 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = (UnCLIPScheduler,)
def A_ ( self , **lowercase ):
_lowerCamelCase : Any = {
'num_train_timesteps': 1000,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**lowercase )
return config
def A_ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase )
def A_ ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowercase )
def A_ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase )
def A_ ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowercase )
def A_ ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowercase )
def A_ ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowercase , prev_timestep=lowercase )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[int] = self.get_scheduler_config(variance_type='fixed_small_log' )
_lowerCamelCase : str = scheduler_class(**lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5
def A_ ( self ):
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config(variance_type='learned_range' )
_lowerCamelCase : int = scheduler_class(**lowercase )
_lowerCamelCase : List[str] = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowercase ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=lowercase ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=lowercase ) - -0.0_01_00_11 < 1E-5
def A_ ( self ):
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config()
_lowerCamelCase : Tuple = scheduler_class(**lowercase )
_lowerCamelCase : Union[str, Any] = scheduler.timesteps
_lowerCamelCase : Any = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter
_lowerCamelCase : Optional[int] = torch.manual_seed(0 )
for i, t in enumerate(lowercase ):
# 1. predict noise residual
_lowerCamelCase : Tuple = model(lowercase , lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : List[Any] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample
_lowerCamelCase : Optional[int] = pred_prev_sample
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) )
_lowerCamelCase : List[Any] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def A_ ( self ):
_lowerCamelCase : Tuple = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : Optional[Any] = scheduler_class(**lowercase )
scheduler.set_timesteps(25 )
_lowerCamelCase : Optional[Any] = scheduler.timesteps
_lowerCamelCase : Optional[int] = self.dummy_model()
_lowerCamelCase : Any = self.dummy_sample_deter
_lowerCamelCase : str = torch.manual_seed(0 )
for i, t in enumerate(lowercase ):
# 1. predict noise residual
_lowerCamelCase : List[Any] = model(lowercase , lowercase )
if i + 1 == timesteps.shape[0]:
_lowerCamelCase : Optional[int] = None
else:
_lowerCamelCase : List[str] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : Union[str, Any] = scheduler.step(
lowercase , lowercase , lowercase , prev_timestep=lowercase , generator=lowercase ).prev_sample
_lowerCamelCase : List[Any] = pred_prev_sample
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) )
_lowerCamelCase : List[str] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def A_ ( self ):
pass
def A_ ( self ):
pass | 12 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowerCAmelCase__ ( a_ ):
'''simple docstring'''
lowerCamelCase__ = 42
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowercase=3 , lowercase=3 , lowercase=("DownEncoderBlock2D",) , lowercase=(64,) , lowercase=2 , lowercase=32 , lowercase="silu" , lowercase=True , ):
super().__init__()
_lowerCamelCase : Dict = layers_per_block
_lowerCamelCase : Optional[Any] = torch.nn.Convad(
lowercase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_lowerCamelCase : Any = None
_lowerCamelCase : Union[str, Any] = nn.ModuleList([] )
# down
_lowerCamelCase : Tuple = block_out_channels[0]
for i, down_block_type in enumerate(lowercase_ ):
_lowerCamelCase : Optional[Any] = output_channel
_lowerCamelCase : Tuple = block_out_channels[i]
_lowerCamelCase : Tuple = i == len(lowercase_ ) - 1
_lowerCamelCase : Any = get_down_block(
lowercase_ , num_layers=self.layers_per_block , in_channels=lowercase_ , out_channels=lowercase_ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=lowercase_ , resnet_groups=lowercase_ , attention_head_dim=lowercase_ , temb_channels=lowercase_ , )
self.down_blocks.append(lowercase_ )
# mid
_lowerCamelCase : Any = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowercase_ , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=lowercase_ , temb_channels=lowercase_ , )
# out
_lowerCamelCase : Dict = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowercase_ , eps=1E-6 )
_lowerCamelCase : Tuple = nn.SiLU()
_lowerCamelCase : str = 2 * out_channels if double_z else out_channels
_lowerCamelCase : int = nn.Convad(block_out_channels[-1] , lowercase_ , 3 , padding=1 )
_lowerCamelCase : Dict = False
def A_ ( self , lowercase ):
_lowerCamelCase : str = x
_lowerCamelCase : Optional[Any] = self.conv_in(lowercase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowercase ):
def custom_forward(*lowercase ):
return module(*lowercase_ )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
_lowerCamelCase : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowercase_ ) , lowercase_ , use_reentrant=lowercase_ )
# middle
_lowerCamelCase : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowercase_ , use_reentrant=lowercase_ )
else:
for down_block in self.down_blocks:
_lowerCamelCase : Optional[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(lowercase_ ) , lowercase_ )
# middle
_lowerCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowercase_ )
else:
# down
for down_block in self.down_blocks:
_lowerCamelCase : Any = down_block(lowercase_ )
# middle
_lowerCamelCase : List[Any] = self.mid_block(lowercase_ )
# post-process
_lowerCamelCase : Optional[Any] = self.conv_norm_out(lowercase_ )
_lowerCamelCase : int = self.conv_act(lowercase_ )
_lowerCamelCase : Optional[Any] = self.conv_out(lowercase_ )
return sample
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowercase=3 , lowercase=3 , lowercase=("UpDecoderBlock2D",) , lowercase=(64,) , lowercase=2 , lowercase=32 , lowercase="silu" , lowercase="group" , ):
super().__init__()
_lowerCamelCase : Dict = layers_per_block
_lowerCamelCase : List[Any] = nn.Convad(
lowercase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : int = nn.ModuleList([] )
_lowerCamelCase : Tuple = in_channels if norm_type == '''spatial''' else None
# mid
_lowerCamelCase : List[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowercase_ , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowercase_ , temb_channels=lowercase_ , )
# up
_lowerCamelCase : int = list(reversed(lowercase_ ) )
_lowerCamelCase : Tuple = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowercase_ ):
_lowerCamelCase : Tuple = output_channel
_lowerCamelCase : Optional[Any] = reversed_block_out_channels[i]
_lowerCamelCase : int = i == len(lowercase_ ) - 1
_lowerCamelCase : List[str] = get_up_block(
lowercase_ , num_layers=self.layers_per_block + 1 , in_channels=lowercase_ , out_channels=lowercase_ , prev_output_channel=lowercase_ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=lowercase_ , resnet_groups=lowercase_ , attention_head_dim=lowercase_ , temb_channels=lowercase_ , resnet_time_scale_shift=lowercase_ , )
self.up_blocks.append(lowercase_ )
_lowerCamelCase : Dict = output_channel
# out
if norm_type == "spatial":
_lowerCamelCase : Optional[Any] = SpatialNorm(block_out_channels[0] , lowercase_ )
else:
_lowerCamelCase : Union[str, Any] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowercase_ , eps=1E-6 )
_lowerCamelCase : Optional[int] = nn.SiLU()
_lowerCamelCase : List[Any] = nn.Convad(block_out_channels[0] , lowercase_ , 3 , padding=1 )
_lowerCamelCase : Optional[Any] = False
def A_ ( self , lowercase , lowercase=None ):
_lowerCamelCase : List[Any] = z
_lowerCamelCase : List[Any] = self.conv_in(lowercase_ )
_lowerCamelCase : Union[str, Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowercase ):
def custom_forward(*lowercase ):
return module(*lowercase_ )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
_lowerCamelCase : Any = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowercase_ , lowercase_ , use_reentrant=lowercase_ )
_lowerCamelCase : Optional[Any] = sample.to(lowercase_ )
# up
for up_block in self.up_blocks:
_lowerCamelCase : Any = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowercase_ ) , lowercase_ , lowercase_ , use_reentrant=lowercase_ )
else:
# middle
_lowerCamelCase : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowercase_ , lowercase_ )
_lowerCamelCase : Any = sample.to(lowercase_ )
# up
for up_block in self.up_blocks:
_lowerCamelCase : str = torch.utils.checkpoint.checkpoint(create_custom_forward(lowercase_ ) , lowercase_ , lowercase_ )
else:
# middle
_lowerCamelCase : Optional[Any] = self.mid_block(lowercase_ , lowercase_ )
_lowerCamelCase : Union[str, Any] = sample.to(lowercase_ )
# up
for up_block in self.up_blocks:
_lowerCamelCase : Optional[Any] = up_block(lowercase_ , lowercase_ )
# post-process
if latent_embeds is None:
_lowerCamelCase : Any = self.conv_norm_out(lowercase_ )
else:
_lowerCamelCase : Dict = self.conv_norm_out(lowercase_ , lowercase_ )
_lowerCamelCase : int = self.conv_act(lowercase_ )
_lowerCamelCase : Optional[Any] = self.conv_out(lowercase_ )
return sample
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase=None , lowercase="random" , lowercase=False , lowercase=True ):
super().__init__()
_lowerCamelCase : List[Any] = n_e
_lowerCamelCase : List[Any] = vq_embed_dim
_lowerCamelCase : int = beta
_lowerCamelCase : Optional[int] = legacy
_lowerCamelCase : Union[str, Any] = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_lowerCamelCase : int = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
_lowerCamelCase : str = self.used.shape[0]
_lowerCamelCase : Optional[Any] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_lowerCamelCase : str = self.re_embed
_lowerCamelCase : Optional[int] = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
_lowerCamelCase : Any = n_e
_lowerCamelCase : List[Any] = sane_index_shape
def A_ ( self , lowercase ):
_lowerCamelCase : Tuple = inds.shape
assert len(lowercase_ ) > 1
_lowerCamelCase : int = inds.reshape(ishape[0] , -1 )
_lowerCamelCase : Dict = self.used.to(lowercase_ )
_lowerCamelCase : Tuple = (inds[:, :, None] == used[None, None, ...]).long()
_lowerCamelCase : Any = match.argmax(-1 )
_lowerCamelCase : str = match.sum(2 ) < 1
if self.unknown_index == "random":
_lowerCamelCase : Dict = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_lowerCamelCase : List[str] = self.unknown_index
return new.reshape(lowercase_ )
def A_ ( self , lowercase ):
_lowerCamelCase : Any = inds.shape
assert len(lowercase_ ) > 1
_lowerCamelCase : Tuple = inds.reshape(ishape[0] , -1 )
_lowerCamelCase : Dict = self.used.to(lowercase_ )
if self.re_embed > self.used.shape[0]: # extra token
_lowerCamelCase : Tuple = 0 # simply set to zero
_lowerCamelCase : int = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowercase_ )
return back.reshape(lowercase_ )
def A_ ( self , lowercase ):
# reshape z -> (batch, height, width, channel) and flatten
_lowerCamelCase : List[Any] = z.permute(0 , 2 , 3 , 1 ).contiguous()
_lowerCamelCase : List[str] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_lowerCamelCase : List[Any] = torch.argmin(torch.cdist(lowercase_ , self.embedding.weight ) , dim=1 )
_lowerCamelCase : List[Any] = self.embedding(lowercase_ ).view(z.shape )
_lowerCamelCase : Tuple = None
_lowerCamelCase : Tuple = None
# compute loss for embedding
if not self.legacy:
_lowerCamelCase : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_lowerCamelCase : List[Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_lowerCamelCase : Dict = z + (z_q - z).detach()
# reshape back to match original input shape
_lowerCamelCase : Optional[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_lowerCamelCase : List[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_lowerCamelCase : List[Any] = self.remap_to_used(lowercase_ )
_lowerCamelCase : int = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_lowerCamelCase : List[str] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def A_ ( self , lowercase , lowercase ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
_lowerCamelCase : List[str] = indices.reshape(shape[0] , -1 ) # add batch axis
_lowerCamelCase : Optional[Any] = self.unmap_to_all(lowercase_ )
_lowerCamelCase : List[str] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_lowerCamelCase : Optional[int] = self.embedding(lowercase_ )
if shape is not None:
_lowerCamelCase : List[Any] = z_q.view(lowercase_ )
# reshape back to match original input shape
_lowerCamelCase : Tuple = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class lowerCAmelCase__ ( a_ ):
'''simple docstring'''
def __init__( self , lowercase , lowercase=False ):
_lowerCamelCase : str = parameters
_lowerCamelCase : Tuple = torch.chunk(lowercase_ , 2 , dim=1 )
_lowerCamelCase : Optional[int] = torch.clamp(self.logvar , -30.0 , 20.0 )
_lowerCamelCase : str = deterministic
_lowerCamelCase : Dict = torch.exp(0.5 * self.logvar )
_lowerCamelCase : List[str] = torch.exp(self.logvar )
if self.deterministic:
_lowerCamelCase : str = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def A_ ( self , lowercase = None ):
# make sure sample is on the same device as the parameters and has same dtype
_lowerCamelCase : Tuple = randn_tensor(
self.mean.shape , generator=lowercase_ , device=self.parameters.device , dtype=self.parameters.dtype )
_lowerCamelCase : Union[str, Any] = self.mean + self.std * sample
return x
def A_ ( self , lowercase=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def A_ ( self , lowercase , lowercase=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
_lowerCamelCase : Dict = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowercase_ )
def A_ ( self ):
return self.mean | 357 |
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """data2vec-audio"""
def __init__( self , lowercase=32 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-5 , lowercase="gelu" , lowercase=(512, 512, 512, 512, 512, 512, 512) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(10, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=16 , lowercase=19 , lowercase=5 , lowercase=0.05 , lowercase=10 , lowercase=2 , lowercase=0.0 , lowercase=10 , lowercase=0 , lowercase="sum" , lowercase=False , lowercase=False , lowercase=256 , lowercase=(512, 512, 512, 512, 1500) , lowercase=(5, 3, 3, 1, 1) , lowercase=(1, 2, 3, 1, 1) , lowercase=512 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=3 , lowercase=2 , lowercase=3 , lowercase=None , **lowercase , ):
super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase )
_lowerCamelCase : str = hidden_size
_lowerCamelCase : str = feat_extract_activation
_lowerCamelCase : Optional[Any] = list(lowercase )
_lowerCamelCase : Dict = list(lowercase )
_lowerCamelCase : Dict = list(lowercase )
_lowerCamelCase : Optional[Any] = conv_bias
_lowerCamelCase : Union[str, Any] = num_conv_pos_embeddings
_lowerCamelCase : List[Any] = num_conv_pos_embedding_groups
_lowerCamelCase : List[Any] = conv_pos_kernel_size
_lowerCamelCase : Optional[int] = len(self.conv_dim )
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Any = hidden_dropout
_lowerCamelCase : Union[str, Any] = attention_dropout
_lowerCamelCase : str = activation_dropout
_lowerCamelCase : Any = feat_proj_dropout
_lowerCamelCase : Tuple = final_dropout
_lowerCamelCase : Union[str, Any] = layerdrop
_lowerCamelCase : List[Any] = layer_norm_eps
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : Optional[int] = vocab_size
_lowerCamelCase : Tuple = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase : Optional[Any] = mask_time_prob
_lowerCamelCase : List[Any] = mask_time_length
_lowerCamelCase : List[Any] = mask_time_min_masks
_lowerCamelCase : Tuple = mask_feature_prob
_lowerCamelCase : Optional[Any] = mask_feature_length
_lowerCamelCase : Dict = mask_feature_min_masks
# ctc loss
_lowerCamelCase : Tuple = ctc_loss_reduction
_lowerCamelCase : str = ctc_zero_infinity
# adapter
_lowerCamelCase : Union[str, Any] = add_adapter
_lowerCamelCase : List[Any] = adapter_kernel_size
_lowerCamelCase : Optional[Any] = adapter_stride
_lowerCamelCase : List[Any] = num_adapter_layers
_lowerCamelCase : int = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCamelCase : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCamelCase : List[str] = list(lowercase )
_lowerCamelCase : Optional[Any] = list(lowercase )
_lowerCamelCase : Any = list(lowercase )
_lowerCamelCase : Optional[Any] = xvector_output_dim
@property
def A_ ( self ):
return math.prod(self.conv_stride ) | 12 | 0 |
"""simple docstring"""
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
lowercase__ = logging.get_logger(__name__)
@add_end_docstrings(lowercase )
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , *lowercase , **lowercase ):
super().__init__(*__A , **__A )
requires_backends(self , 'decord' )
self.check_model_type(__A )
def A_ ( self , lowercase=None , lowercase=None , lowercase=None ):
_lowerCamelCase : Tuple = {}
if frame_sampling_rate is not None:
_lowerCamelCase : Dict = frame_sampling_rate
if num_frames is not None:
_lowerCamelCase : Optional[int] = num_frames
_lowerCamelCase : int = {}
if top_k is not None:
_lowerCamelCase : str = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowercase , **lowercase ):
return super().__call__(__A , **__A )
def A_ ( self , lowercase , lowercase=None , lowercase=1 ):
if num_frames is None:
_lowerCamelCase : str = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
_lowerCamelCase : Dict = BytesIO(requests.get(__A ).content )
_lowerCamelCase : Optional[int] = VideoReader(__A )
videoreader.seek(0 )
_lowerCamelCase : str = 0
_lowerCamelCase : List[str] = num_frames * frame_sampling_rate - 1
_lowerCamelCase : List[str] = np.linspace(__A , __A , num=__A , dtype=np.intaa )
_lowerCamelCase : int = videoreader.get_batch(__A ).asnumpy()
_lowerCamelCase : int = list(__A )
_lowerCamelCase : Any = self.image_processor(__A , return_tensors=self.framework )
return model_inputs
def A_ ( self , lowercase ):
_lowerCamelCase : Any = self.model(**__A )
return model_outputs
def A_ ( self , lowercase , lowercase=5 ):
if top_k > self.model.config.num_labels:
_lowerCamelCase : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
_lowerCamelCase : str = model_outputs.logits.softmax(-1 )[0]
_lowerCamelCase, _lowerCamelCase : Any = probs.topk(__A )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_lowerCamelCase : int = scores.tolist()
_lowerCamelCase : Union[str, Any] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__A , __A )]
| 358 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
lowercase__ = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """facebook/nllb-200-distilled-600M"""
lowerCamelCase__ = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
lowerCamelCase__ = """translator"""
lowerCamelCase__ = AutoTokenizer
lowerCamelCase__ = AutoModelForSeqaSeqLM
lowerCamelCase__ = LANGUAGE_CODES
lowerCamelCase__ = ["""text""", """text""", """text"""]
lowerCamelCase__ = ["""text"""]
def A_ ( self , lowercase , lowercase , lowercase ):
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''' )
_lowerCamelCase : str = self.lang_to_code[src_lang]
_lowerCamelCase : int = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowercase , return_tensors='pt' , src_lang=lowercase , tgt_lang=lowercase )
def A_ ( self , lowercase ):
return self.model.generate(**lowercase )
def A_ ( self , lowercase ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowercase ) | 12 | 0 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
super().tearDown()
gc.collect()
def A_ ( self ):
_lowerCamelCase : int = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
_lowerCamelCase : Any = """A painting of a squirrel eating a burger"""
_lowerCamelCase : Union[str, Any] = jax.device_count()
_lowerCamelCase : List[str] = num_samples * [prompt]
_lowerCamelCase : Optional[Any] = sd_pipe.prepare_inputs(_SCREAMING_SNAKE_CASE )
_lowerCamelCase : List[str] = replicate(_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Any = shard(_SCREAMING_SNAKE_CASE )
_lowerCamelCase : int = jax.random.PRNGKey(0 )
_lowerCamelCase : Tuple = jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() )
_lowerCamelCase : Dict = sd_pipe(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_inference_steps=25 , jit=_SCREAMING_SNAKE_CASE )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_lowerCamelCase : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowerCamelCase : int = images[0, 253:256, 253:256, -1]
_lowerCamelCase : Any = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCamelCase : str = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def A_ ( self ):
_lowerCamelCase : Optional[Any] = """stabilityai/stable-diffusion-2"""
_lowerCamelCase : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder='scheduler' )
_lowerCamelCase : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , revision='bf16' , dtype=jnp.bfloataa , )
_lowerCamelCase : Optional[Any] = scheduler_params
_lowerCamelCase : int = """A painting of a squirrel eating a burger"""
_lowerCamelCase : Optional[Any] = jax.device_count()
_lowerCamelCase : Optional[Any] = num_samples * [prompt]
_lowerCamelCase : Any = sd_pipe.prepare_inputs(_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Optional[int] = replicate(_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Dict = shard(_SCREAMING_SNAKE_CASE )
_lowerCamelCase : int = jax.random.PRNGKey(0 )
_lowerCamelCase : Tuple = jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() )
_lowerCamelCase : List[str] = sd_pipe(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_inference_steps=25 , jit=_SCREAMING_SNAKE_CASE )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_lowerCamelCase : Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowerCamelCase : Optional[Any] = images[0, 253:256, 253:256, -1]
_lowerCamelCase : Any = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCamelCase : List[Any] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 | 359 |
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def A_ ( self , lowercase , lowercase , lowercase ):
_lowerCamelCase : Optional[int] = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
_lowerCamelCase : Tuple = VideoClassificationPipeline(model=lowercase , image_processor=lowercase , top_k=2 )
_lowerCamelCase : List[str] = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def A_ ( self , lowercase , lowercase ):
for example in examples:
_lowerCamelCase : Tuple = video_classifier(lowercase )
self.assertEqual(
lowercase , [
{'score': ANY(lowercase ), 'label': ANY(lowercase )},
{'score': ANY(lowercase ), 'label': ANY(lowercase )},
] , )
@require_torch
def A_ ( self ):
_lowerCamelCase : Optional[Any] = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
_lowerCamelCase : Tuple = VideoMAEFeatureExtractor(
size={'shortest_edge': 10} , crop_size={'height': 10, 'width': 10} )
_lowerCamelCase : Dict = pipeline(
'video-classification' , model=lowercase , feature_extractor=lowercase , frame_sampling_rate=4 )
_lowerCamelCase : Any = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
_lowerCamelCase : Dict = video_classifier(lowercase , top_k=2 )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}] , )
_lowerCamelCase : str = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
[{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}],
[{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}],
] , )
@require_tf
def A_ ( self ):
pass | 12 | 0 |
"""simple docstring"""
from random import randint, random
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False , lowercase__ = False , lowercase__ = 5 , ):
_lowerCamelCase : Optional[Any] = [[-1] * number_of_cells] # Create a highway without any car
_lowerCamelCase : Any = 0
_lowerCamelCase : List[str] = max(_a , 0 )
while i < number_of_cells:
_lowerCamelCase : Optional[Any] = (
randint(0 , _a ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Any = 0
_lowerCamelCase : int = highway_now[car_index + 1 :]
for cell in range(len(_a ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(_a , -1 )
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : str = len(_a )
# Beforce calculations, the highway is empty
_lowerCamelCase : Any = [-1] * number_of_cells
for car_index in range(_a ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_lowerCamelCase : Any = min(highway_now[car_index] + 1 , _a )
# Number of empty cell before the next car
_lowerCamelCase : Optional[int] = get_distance(_a , _a ) - 1
# We can't have the car causing an accident
_lowerCamelCase : Union[str, Any] = min(next_highway[car_index] , _a )
if random() < probability:
# Randomly, a driver will slow down
_lowerCamelCase : Tuple = max(next_highway[car_index] - 1 , 0 )
return next_highway
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : Union[str, Any] = len(highway[0] )
for i in range(_a ):
_lowerCamelCase : str = update(highway[i] , _a , _a )
_lowerCamelCase : Any = [-1] * number_of_cells
for car_index in range(_a ):
_lowerCamelCase : Any = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_lowerCamelCase : Any = (car_index + speed) % number_of_cells
# Commit the change of position
_lowerCamelCase : Optional[int] = speed
highway.append(_a )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod() | 360 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase__ = {
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 12 | 0 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
def _snake_case ( lowercase__ ):
_lowerCamelCase : int = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
_lowerCamelCase : Optional[int] = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
_lowerCamelCase : Optional[Any] = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_lowerCamelCase : Dict = key[key.find('patch_embed' ) + len('patch_embed' )]
_lowerCamelCase : Tuple = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(__lowerCAmelCase )-1}''' )
if "norm" in key:
_lowerCamelCase : Dict = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_lowerCamelCase : Dict = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
_lowerCamelCase : List[str] = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(__lowerCAmelCase )-1}''' )
if "layer_norm1" in key:
_lowerCamelCase : Tuple = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
_lowerCamelCase : Optional[int] = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
_lowerCamelCase : Tuple = key[key.find('block' ) + len('block' )]
_lowerCamelCase : Optional[int] = key.replace(f'''block{idx}''' , f'''block.{int(__lowerCAmelCase )-1}''' )
if "attn.q" in key:
_lowerCamelCase : Dict = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
_lowerCamelCase : str = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
_lowerCamelCase : Union[str, Any] = key.replace('attn' , 'attention.self' )
if "fc1" in key:
_lowerCamelCase : List[Any] = key.replace('fc1' , 'dense1' )
if "fc2" in key:
_lowerCamelCase : List[str] = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
_lowerCamelCase : Any = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
_lowerCamelCase : int = key.replace('linear_fuse.conv' , 'linear_fuse' )
_lowerCamelCase : Optional[int] = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_lowerCamelCase : Tuple = key[key.find('linear_c' ) + len('linear_c' )]
_lowerCamelCase : Any = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(__lowerCAmelCase )-1}''' )
if "bot_conv" in key:
_lowerCamelCase : List[str] = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
_lowerCamelCase : Dict = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
_lowerCamelCase : int = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
_lowerCamelCase : Optional[int] = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
_lowerCamelCase : List[Any] = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
_lowerCamelCase : Union[str, Any] = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
_lowerCamelCase : Dict = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
_lowerCamelCase : Optional[Any] = key.replace('module.last_layer_depth' , 'head.head' )
_lowerCamelCase : int = value
return new_state_dict
def _snake_case ( lowercase__ , lowercase__ ):
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_lowerCamelCase : Optional[Any] = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
_lowerCamelCase : int = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
_lowerCamelCase : Union[str, Any] = kv_weight[
: config.hidden_sizes[i], :
]
_lowerCamelCase : Optional[int] = kv_bias[: config.hidden_sizes[i]]
_lowerCamelCase : str = kv_weight[
config.hidden_sizes[i] :, :
]
_lowerCamelCase : Tuple = kv_bias[config.hidden_sizes[i] :]
def _snake_case ( ):
_lowerCamelCase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase : int = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return image
@torch.no_grad()
def _snake_case ( lowercase__ , lowercase__ , lowercase__=False , lowercase__=None ):
_lowerCamelCase : Dict = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
_lowerCamelCase : Dict = GLPNImageProcessor()
# prepare image
_lowerCamelCase : Union[str, Any] = prepare_img()
_lowerCamelCase : List[Any] = image_processor(images=__lowerCAmelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
_lowerCamelCase : str = torch.load(__lowerCAmelCase , map_location=torch.device('cpu' ) )
# rename keys
_lowerCamelCase : int = rename_keys(__lowerCAmelCase )
# key and value matrices need special treatment
read_in_k_v(__lowerCAmelCase , __lowerCAmelCase )
# create HuggingFace model and load state dict
_lowerCamelCase : Tuple = GLPNForDepthEstimation(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# forward pass
_lowerCamelCase : int = model(__lowerCAmelCase )
_lowerCamelCase : List[str] = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
_lowerCamelCase : str = torch.tensor(
[[4.4_1_4_7, 4.0_8_7_3, 4.0_6_7_3], [3.7_8_9_0, 3.2_8_8_1, 3.1_5_2_5], [3.7_6_7_4, 3.5_4_2_3, 3.4_9_1_3]] )
elif "kitti" in model_name:
_lowerCamelCase : Optional[Any] = torch.tensor(
[[3.4_2_9_1, 2.7_8_6_5, 2.5_1_5_1], [3.2_8_4_1, 2.7_0_2_1, 2.3_5_0_2], [3.1_1_4_7, 2.4_6_2_5, 2.2_4_8_1]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
_lowerCamelCase : Tuple = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , __lowerCAmelCase , atol=1E-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(__lowerCAmelCase , __lowerCAmelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__lowerCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(__lowerCAmelCase , __lowerCAmelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__lowerCAmelCase , )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you\'re pushing to the hub.""",
)
lowercase__ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 361 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _snake_case ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None ):
if attention_mask is None:
_lowerCamelCase : List[str] = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = OPTConfig
lowerCamelCase__ = {}
lowerCamelCase__ = """gelu"""
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=16 , lowercase=2 , lowercase=4 , lowercase=4 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , lowercase=16 , lowercase=16 , ):
_lowerCamelCase : Tuple = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : str = is_training
_lowerCamelCase : Optional[int] = use_labels
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[str] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : List[Any] = eos_token_id
_lowerCamelCase : Tuple = pad_token_id
_lowerCamelCase : List[str] = bos_token_id
_lowerCamelCase : Optional[int] = embed_dim
_lowerCamelCase : List[str] = word_embed_proj_dim
_lowerCamelCase : Any = False
def A_ ( self ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowerCamelCase : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowerCamelCase : str = tf.concat([input_ids, eos_tensor] , axis=1 )
_lowerCamelCase : Tuple = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase , **self.config_updates , )
_lowerCamelCase : int = prepare_opt_inputs_dict(lowercase , lowercase )
return config, inputs_dict
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : Optional[Any] = TFOPTModel(config=lowercase )
_lowerCamelCase : Optional[Any] = inputs_dict['input_ids']
_lowerCamelCase : str = input_ids[:1, :]
_lowerCamelCase : Dict = inputs_dict['attention_mask'][:1, :]
_lowerCamelCase : Optional[Any] = 1
# first forward pass
_lowerCamelCase : Any = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
_lowerCamelCase, _lowerCamelCase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCamelCase : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCamelCase : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_lowerCamelCase : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
_lowerCamelCase : Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_lowerCamelCase : Optional[Any] = model(lowercase , attention_mask=lowercase )[0]
_lowerCamelCase : List[str] = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_lowerCamelCase : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_lowerCamelCase : Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
_lowerCamelCase : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
@require_tf
class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowerCamelCase__ = (TFOPTForCausalLM,) if is_tf_available() else ()
lowerCamelCase__ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = 10
def A_ ( self ):
_lowerCamelCase : int = TFOPTModelTester(self )
_lowerCamelCase : Tuple = ConfigTester(self , config_class=lowercase )
def A_ ( self ):
self.config_tester.run_common_tests()
def A_ ( self ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowercase , lowercase ):
if hasattr(lowercase , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowercase , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
_lowerCamelCase : Optional[int] = model_class(config=lowercase )
_lowerCamelCase : int = _get_word_embedding_weight(lowercase , model.get_input_embeddings() )
_lowerCamelCase : Tuple = _get_word_embedding_weight(lowercase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowercase )
_lowerCamelCase : str = _get_word_embedding_weight(lowercase , model.get_input_embeddings() )
_lowerCamelCase : Any = _get_word_embedding_weight(lowercase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_lowerCamelCase : Union[str, Any] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowercase )
# check that weights remain the same after resizing
_lowerCamelCase : int = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_lowerCamelCase : Optional[Any] = False
self.assertTrue(lowercase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowercase )
_lowerCamelCase : Dict = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_lowerCamelCase : Union[str, Any] = False
self.assertTrue(lowercase )
def _snake_case ( lowercase__ ):
return tf.constant(lowercase__ , dtype=tf.intaa )
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = 99
def A_ ( self ):
_lowerCamelCase : Tuple = tf.ones((4, 1) , dtype=tf.intaa ) * 2
_lowerCamelCase : Tuple = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
_lowerCamelCase : int = input_ids.shape[0]
_lowerCamelCase : List[Any] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A_ ( self ):
_lowerCamelCase : Tuple = TFOPTModel.from_pretrained('facebook/opt-350m' )
_lowerCamelCase : List[Any] = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_lowerCamelCase : List[str] = tf.not_equal(lowercase , model.config.pad_token_id )
with tf.GradientTape():
_lowerCamelCase : List[str] = model(input_ids=lowercase , attention_mask=lowercase ).last_hidden_state
_lowerCamelCase : Optional[Any] = (1, 11, 512)
self.assertEqual(output.shape , lowercase )
_lowerCamelCase : List[str] = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-3 ) )
_lowerCamelCase : List[str] = tf.function(lowercase , jit_compile=lowercase )
_lowerCamelCase : Union[str, Any] = xla_generate(lowercase , lowercase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-2 ) )
@require_tf
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
super().setUp()
_lowerCamelCase : List[Any] = 'facebook/opt-350m'
def A_ ( self ):
_lowerCamelCase : int = TFOPTForCausalLM.from_pretrained(self.path_model )
_lowerCamelCase : List[Any] = GPTaTokenizer.from_pretrained(self.path_model )
_lowerCamelCase : List[str] = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_lowerCamelCase : List[Any] = tokenizer(lowercase , return_tensors='tf' , padding=lowercase , add_special_tokens=lowercase )
_lowerCamelCase : Optional[int] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
_lowerCamelCase : Any = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) )
_lowerCamelCase : Tuple = tf.function(lowercase , jit_compile=lowercase )
_lowerCamelCase : List[Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) )
@require_tf
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def A_ ( self ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def A_ ( self ):
_lowerCamelCase : str = 'facebook/opt-125m'
_lowerCamelCase : Dict = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Optional[int] = GPTaTokenizer.from_pretrained(lowercase )
_lowerCamelCase : Dict = TFOPTForCausalLM.from_pretrained(lowercase )
for prompt in self.prompts:
_lowerCamelCase : int = tokenizer(lowercase , return_tensors='tf' ).input_ids
_lowerCamelCase : int = model.generate(lowercase , max_length=10 )
_lowerCamelCase : Any = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
predicted_outputs += generated_string
self.assertListEqual(lowercase , lowercase )
def A_ ( self ):
_lowerCamelCase : List[Any] = 'facebook/opt-350m'
_lowerCamelCase : int = GPTaTokenizer.from_pretrained(lowercase )
_lowerCamelCase : Optional[int] = TFOPTForCausalLM.from_pretrained(lowercase )
_lowerCamelCase : Any = 'left'
# use different length sentences to test batching
_lowerCamelCase : Optional[int] = [
'Hello, my dog is a little',
'Today, I',
]
_lowerCamelCase : Dict = tokenizer(lowercase , return_tensors='tf' , padding=lowercase )
_lowerCamelCase : int = inputs['input_ids']
_lowerCamelCase : Tuple = model.generate(input_ids=lowercase , attention_mask=inputs['attention_mask'] )
_lowerCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
_lowerCamelCase : Union[str, Any] = model.generate(input_ids=lowercase )
_lowerCamelCase : Dict = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
_lowerCamelCase : int = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
_lowerCamelCase : Union[str, Any] = model.generate(input_ids=lowercase , max_length=model.config.max_length - num_paddings )
_lowerCamelCase : List[Any] = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
_lowerCamelCase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase )
_lowerCamelCase : Optional[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase )
_lowerCamelCase : Optional[Any] = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , [non_padded_sentence, padded_sentence] )
def A_ ( self ):
_lowerCamelCase : Tuple = 'facebook/opt-350m'
_lowerCamelCase : List[Any] = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Optional[Any] = GPTaTokenizer.from_pretrained(lowercase )
_lowerCamelCase : Optional[Any] = TFOPTForCausalLM.from_pretrained(lowercase )
for prompt in self.prompts:
_lowerCamelCase : List[Any] = tokenizer(lowercase , return_tensors='tf' ).input_ids
_lowerCamelCase : Optional[Any] = model.generate(lowercase , max_length=10 )
_lowerCamelCase : Dict = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
predicted_outputs += generated_string
self.assertListEqual(lowercase , lowercase ) | 12 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance == 0:
return {"resistance": sqrt(pow(_lowercase , 2 ) - pow(_lowercase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(_lowercase , 2 ) - pow(_lowercase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(_lowercase , 2 ) + pow(_lowercase , 2 ) )}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 362 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """philschmid/bart-large-cnn-samsum"""
lowerCamelCase__ = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
lowerCamelCase__ = """summarizer"""
lowerCamelCase__ = AutoTokenizer
lowerCamelCase__ = AutoModelForSeqaSeqLM
lowerCamelCase__ = ["""text"""]
lowerCamelCase__ = ["""text"""]
def A_ ( self , lowercase ):
return self.pre_processor(lowercase , return_tensors='pt' , truncation=lowercase )
def A_ ( self , lowercase ):
return self.model.generate(**lowercase )[0]
def A_ ( self , lowercase ):
return self.pre_processor.decode(lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase ) | 12 | 0 |
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
lowercase__ = parser.parse_args()
lowercase__ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 363 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : Tuple = list(range(len(lowercase__ ) ) )
_lowerCamelCase : Any = [v / w for v, w in zip(lowercase__ , lowercase__ )]
index.sort(key=lambda lowercase__ : ratio[i] , reverse=lowercase__ )
_lowerCamelCase : float = 0
_lowerCamelCase : list[float] = [0] * len(lowercase__ )
for i in index:
if weight[i] <= capacity:
_lowerCamelCase : int = 1
max_value += value[i]
capacity -= weight[i]
else:
_lowerCamelCase : Any = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 12 | 0 |
"""simple docstring"""
import sys
lowercase__ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def _snake_case ( lowercase__ = N ):
_lowerCamelCase : Optional[Any] = -sys.maxsize - 1
for i in range(len(snake_case__ ) - 12 ):
_lowerCamelCase : Any = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
_lowerCamelCase : Optional[Any] = product
return largest_product
if __name__ == "__main__":
print(F"{solution() = }")
| 364 |
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowercase__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowercase__ = []
lowercase__ = []
lowercase__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowercase__ = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F"🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results",
"""emoji""": True,
},
}
]
lowercase__ = 0
for log in Path().glob("""*.log"""):
lowercase__ = 0
with open(log, """r""") as f:
for line in f:
lowercase__ = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowercase__ = line["""nodeid"""]
if line.get("""duration""", None) is not None:
lowercase__ = F"{line['duration']:.4f}"
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowercase__ = []
log.unlink()
lowercase__ = """"""
lowercase__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowercase__ = []
lowercase__ = {}
for test in failed_tests:
lowercase__ = test[0].split("""::""")
lowercase__ = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowercase__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowercase__ = [test[0] for test in failed_table]
lowercase__ = list(set(files))
# Count number of instances in failed_tests
lowercase__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowercase__ = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
lowercase__ = """Too many failed tests, please see the full report in the Action results."""
lowercase__ = len(err) + 10
lowercase__ = message[: 3000 - offset] + F"\n...\n```\n{err}"
print(F"### {message}")
else:
lowercase__ = """No failed tests! 🤗"""
print(F"## {message}")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowercase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowercase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowercase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F"https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
payload.append(action_button)
lowercase__ = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}",
}
],
}
payload.append(date_report)
lowercase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowercase__ = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowercase__ = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowercase__ = row[0]
else:
lowercase__ = """"""
lowercase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F"Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
) | 12 | 0 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _snake_case ( lowercase__ ):
_lowerCamelCase : int = 384
if "tiny" in model_name:
_lowerCamelCase : Optional[Any] = [3, 3, 9, 3]
_lowerCamelCase : Any = [96, 192, 384, 768]
if "small" in model_name:
_lowerCamelCase : Any = [3, 3, 27, 3]
_lowerCamelCase : Dict = [96, 192, 384, 768]
if "base" in model_name:
_lowerCamelCase : int = [3, 3, 27, 3]
_lowerCamelCase : Optional[int] = [128, 256, 512, 1024]
_lowerCamelCase : Any = 512
if "large" in model_name:
_lowerCamelCase : List[Any] = [3, 3, 27, 3]
_lowerCamelCase : Tuple = [192, 384, 768, 1536]
_lowerCamelCase : Optional[int] = 768
if "xlarge" in model_name:
_lowerCamelCase : Dict = [3, 3, 27, 3]
_lowerCamelCase : Tuple = [256, 512, 1024, 2048]
_lowerCamelCase : Dict = 1024
# set label information
_lowerCamelCase : Dict = 150
_lowerCamelCase : str = """huggingface/label-files"""
_lowerCamelCase : int = """ade20k-id2label.json"""
_lowerCamelCase : int = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='dataset' ) , 'r' ) )
_lowerCamelCase : Optional[Any] = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
_lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
_lowerCamelCase : List[Any] = ConvNextConfig(
depths=lowerCAmelCase__ , hidden_sizes=lowerCAmelCase__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
_lowerCamelCase : Optional[Any] = UperNetConfig(
backbone_config=lowerCAmelCase__ , auxiliary_in_channels=lowerCAmelCase__ , num_labels=lowerCAmelCase__ , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ , )
return config
def _snake_case ( lowercase__ ):
_lowerCamelCase : Tuple = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : Tuple = dct.pop(lowerCAmelCase__ )
_lowerCamelCase : int = val
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : str = {
"""upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""",
"""upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""",
"""upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""",
"""upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""",
"""upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""",
}
_lowerCamelCase : List[str] = model_name_to_url[model_name]
_lowerCamelCase : Any = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='cpu' )["""state_dict"""]
_lowerCamelCase : Any = get_upernet_config(lowerCAmelCase__ )
_lowerCamelCase : int = UperNetForSemanticSegmentation(lowerCAmelCase__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_lowerCamelCase : Optional[int] = state_dict.pop(lowerCAmelCase__ )
if "bn" in key:
_lowerCamelCase : str = key.replace('bn' , 'batch_norm' )
_lowerCamelCase : Dict = val
# rename keys
_lowerCamelCase : Optional[int] = create_rename_keys(lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
# verify on image
_lowerCamelCase : List[str] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
_lowerCamelCase : Any = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert('RGB' )
_lowerCamelCase : Tuple = SegformerImageProcessor()
_lowerCamelCase : List[Any] = processor(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
with torch.no_grad():
_lowerCamelCase : List[str] = model(lowerCAmelCase__ )
if model_name == "upernet-convnext-tiny":
_lowerCamelCase : List[Any] = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] )
elif model_name == "upernet-convnext-small":
_lowerCamelCase : Optional[Any] = torch.tensor(
[[-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.7_6_3_8, -8.7_6_3_8, -8.6_2_4_0]] )
elif model_name == "upernet-convnext-base":
_lowerCamelCase : int = torch.tensor(
[[-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.7_6_6_9, -8.7_6_6_9, -8.6_0_2_1]] )
elif model_name == "upernet-convnext-large":
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_3_1_0, -8.6_3_1_0, -8.5_9_6_4]] )
elif model_name == "upernet-convnext-xlarge":
_lowerCamelCase : List[str] = torch.tensor(
[[-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_3_7_9, -8.4_3_7_9, -8.3_4_1_2]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[F"upernet-convnext-{size}" for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowercase__ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 365 |
"""simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """AutoTokenizer"""
lowerCamelCase__ = ["""tokenizer"""]
lowerCamelCase__ = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self , lowercase , lowercase=None ):
super().__init__(lowercase )
_lowerCamelCase : Optional[int] = speaker_embeddings
@classmethod
def A_ ( cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ):
if speaker_embeddings_dict_path is not None:
_lowerCamelCase : Optional[Any] = get_file_from_repo(
lowercase , lowercase , subfolder=kwargs.pop('subfolder' , lowercase ) , cache_dir=kwargs.pop('cache_dir' , lowercase ) , force_download=kwargs.pop('force_download' , lowercase ) , proxies=kwargs.pop('proxies' , lowercase ) , resume_download=kwargs.pop('resume_download' , lowercase ) , local_files_only=kwargs.pop('local_files_only' , lowercase ) , use_auth_token=kwargs.pop('use_auth_token' , lowercase ) , revision=kwargs.pop('revision' , lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'''`{os.path.join(lowercase , lowercase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
_lowerCamelCase : List[Any] = None
else:
with open(lowercase ) as speaker_embeddings_json:
_lowerCamelCase : Union[str, Any] = json.load(lowercase )
else:
_lowerCamelCase : Tuple = None
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase , **lowercase )
return cls(tokenizer=lowercase , speaker_embeddings=lowercase )
def A_ ( self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowercase , lowercase , 'v2' ) , exist_ok=lowercase )
_lowerCamelCase : int = {}
_lowerCamelCase : List[Any] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_lowerCamelCase : Optional[Any] = self._load_voice_preset(lowercase )
_lowerCamelCase : Any = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , lowercase , F'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=lowercase , )
_lowerCamelCase : List[str] = os.path.join(lowercase , F'''{prompt_key}_{key}.npy''' )
_lowerCamelCase : Optional[Any] = tmp_dict
with open(os.path.join(lowercase , lowercase ) , 'w' ) as fp:
json.dump(lowercase , lowercase )
super().save_pretrained(lowercase , lowercase , **lowercase )
def A_ ( self , lowercase = None , **lowercase ):
_lowerCamelCase : Tuple = self.speaker_embeddings[voice_preset]
_lowerCamelCase : Any = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
_lowerCamelCase : Union[str, Any] = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , lowercase ) , cache_dir=kwargs.pop('cache_dir' , lowercase ) , force_download=kwargs.pop('force_download' , lowercase ) , proxies=kwargs.pop('proxies' , lowercase ) , resume_download=kwargs.pop('resume_download' , lowercase ) , local_files_only=kwargs.pop('local_files_only' , lowercase ) , use_auth_token=kwargs.pop('use_auth_token' , lowercase ) , revision=kwargs.pop('revision' , lowercase ) , )
if path is None:
raise ValueError(
F'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
_lowerCamelCase : List[str] = np.load(lowercase )
return voice_preset_dict
def A_ ( self , lowercase = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ):
if voice_preset is not None and not isinstance(lowercase , lowercase ):
if (
isinstance(lowercase , lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_lowerCamelCase : Any = self._load_voice_preset(lowercase )
else:
if isinstance(lowercase , lowercase ) and not voice_preset.endswith('.npz' ):
_lowerCamelCase : Optional[Any] = voice_preset + '.npz'
_lowerCamelCase : Union[str, Any] = np.load(lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(lowercase , **lowercase )
_lowerCamelCase : Tuple = BatchFeature(data=lowercase , tensor_type=lowercase )
_lowerCamelCase : Any = self.tokenizer(
lowercase , return_tensors=lowercase , padding='max_length' , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , )
if voice_preset is not None:
_lowerCamelCase : Optional[int] = voice_preset
return encoded_text | 12 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , **lowercase ):
_lowerCamelCase : Optional[Any] = feature_size
_lowerCamelCase : Dict = sampling_rate
_lowerCamelCase : List[Any] = padding_value
_lowerCamelCase : Dict = kwargs.pop('padding_side' , 'right' )
_lowerCamelCase : Tuple = kwargs.pop('return_attention_mask' , UpperCamelCase__ )
super().__init__(**UpperCamelCase__ )
def A_ ( self , lowercase , lowercase = True , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , ):
if isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
_lowerCamelCase : Any = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
F''' to this method that includes {self.model_input_names[0]}, but you provided'''
F''' {list(processed_features.keys() )}''' )
_lowerCamelCase : Any = processed_features[self.model_input_names[0]]
_lowerCamelCase : str = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(UpperCamelCase__ ) == 0:
if return_attention_mask:
_lowerCamelCase : Any = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
_lowerCamelCase : int = required_input[0]
if isinstance(UpperCamelCase__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
_lowerCamelCase : int = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(UpperCamelCase__ ):
_lowerCamelCase : Any = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(UpperCamelCase__ ):
_lowerCamelCase : Tuple = '''tf'''
elif is_torch_tensor(UpperCamelCase__ ):
_lowerCamelCase : Tuple = '''pt'''
elif isinstance(UpperCamelCase__ , (int, float, list, tuple, np.ndarray) ):
_lowerCamelCase : Any = '''np'''
else:
raise ValueError(
F'''type of {first_element} unknown: {type(UpperCamelCase__ )}. '''
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
_lowerCamelCase : Dict = to_numpy(UpperCamelCase__ )
else:
_lowerCamelCase : int = [to_numpy(UpperCamelCase__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
_lowerCamelCase : Tuple = self._get_padding_strategies(padding=UpperCamelCase__ , max_length=UpperCamelCase__ )
_lowerCamelCase : Any = processed_features[self.model_input_names[0]]
_lowerCamelCase : Dict = len(UpperCamelCase__ )
if not all(len(UpperCamelCase__ ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
_lowerCamelCase : Union[str, Any] = []
for i in range(UpperCamelCase__ ):
_lowerCamelCase : Dict = {k: v[i] for k, v in processed_features.items()}
# truncation
_lowerCamelCase : int = self._truncate(
UpperCamelCase__ , max_length=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , truncation=UpperCamelCase__ , )
truncated_inputs.append(UpperCamelCase__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
_lowerCamelCase : Optional[Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
_lowerCamelCase : int = PaddingStrategy.MAX_LENGTH
_lowerCamelCase : int = {}
for i in range(UpperCamelCase__ ):
# padding
_lowerCamelCase : Optional[Any] = self._pad(
truncated_inputs[i] , max_length=UpperCamelCase__ , padding_strategy=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , )
for key, value in outputs.items():
if key not in batch_outputs:
_lowerCamelCase : str = []
if value.dtype is np.dtype(np.floataa ):
_lowerCamelCase : Dict = value.astype(np.floataa )
batch_outputs[key].append(UpperCamelCase__ )
return BatchFeature(UpperCamelCase__ , tensor_type=UpperCamelCase__ )
def A_ ( self , lowercase , lowercase = None , lowercase = PaddingStrategy.DO_NOT_PAD , lowercase = None , lowercase = None , ):
_lowerCamelCase : Optional[int] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
_lowerCamelCase : Tuple = len(UpperCamelCase__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_lowerCamelCase : Any = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_lowerCamelCase : Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(UpperCamelCase__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
_lowerCamelCase : Optional[int] = np.ones(len(UpperCamelCase__ ) , dtype=np.intaa )
if needs_to_be_padded:
_lowerCamelCase : Union[str, Any] = max_length - len(UpperCamelCase__ )
if self.padding_side == "right":
if return_attention_mask:
_lowerCamelCase : str = np.pad(
processed_features['attention_mask'] , (0, difference) )
_lowerCamelCase : Tuple = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
_lowerCamelCase : Optional[int] = np.pad(
UpperCamelCase__ , UpperCamelCase__ , 'constant' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
_lowerCamelCase : Optional[int] = np.pad(
processed_features['attention_mask'] , (difference, 0) )
_lowerCamelCase : int = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
_lowerCamelCase : int = np.pad(
UpperCamelCase__ , UpperCamelCase__ , 'constant' , constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
_lowerCamelCase : Union[str, Any] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_lowerCamelCase : List[str] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_lowerCamelCase : List[Any] = len(UpperCamelCase__ ) > max_length
if needs_to_be_truncated:
_lowerCamelCase : Any = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
_lowerCamelCase : List[str] = processed_features['''attention_mask'''][:max_length]
return processed_features
def A_ ( self , lowercase=False , lowercase=None ):
if padding is not False:
if padding is True:
_lowerCamelCase : Tuple = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_lowerCamelCase : Any = PaddingStrategy(UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_lowerCamelCase : Any = padding
else:
_lowerCamelCase : List[Any] = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy | 366 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowercase__ = False
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : Tuple = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
_lowerCamelCase : Dict = torch.manual_seed(0 )
_lowerCamelCase : Dict = pipe(
image=lowercase , generator=lowercase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
_lowerCamelCase : str = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCamelCase : Any = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 12 | 0 |
"""simple docstring"""
from collections import deque
from .hash_table import HashTable
class lowerCAmelCase__ ( _a ):
'''simple docstring'''
def __init__( self , *lowercase , **lowercase ):
super().__init__(*lowercase , **lowercase )
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : Tuple = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(lowercase )
_lowerCamelCase : Optional[Any] = self.values[key]
def A_ ( self ):
return (
sum(self.charge_factor - len(lowercase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def A_ ( self , lowercase , lowercase=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(lowercase ) == 0
):
return key
return super()._collision_resolution(lowercase , lowercase )
| 367 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
lowercase__ = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
lowercase__ = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
lowercase__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def _snake_case ( lowercase__ ):
_lowerCamelCase : Tuple = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def _snake_case ( lowercase__ ):
return x[0]
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[Any] = get_letter_count(lowercase__ )
_lowerCamelCase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowercase__ )
_lowerCamelCase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowercase__ )
_lowerCamelCase : Optional[int] = ''.join(freq_to_letter[freq] )
_lowerCamelCase : Any = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowercase__ , reverse=lowercase__ )
_lowerCamelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowercase__ )
def _snake_case ( lowercase__ ):
_lowerCamelCase : str = get_frequency_order(lowercase__ )
_lowerCamelCase : Union[str, Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod() | 12 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowercase__ = False
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A_ ( self ):
return 12
@property
def A_ ( self ):
return 12
@property
def A_ ( self ):
return 32
@property
def A_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def A_ ( self ):
_lowerCamelCase : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def A_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(UpperCamelCase__ )
@property
def A_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase : str = 12
_lowerCamelCase : Optional[Any] = 12
_lowerCamelCase : List[Any] = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
_lowerCamelCase : Dict = TransformeraDModel(**UpperCamelCase__ )
return model
def A_ ( self ):
_lowerCamelCase : Tuple = 'cpu'
_lowerCamelCase : Dict = self.dummy_vqvae
_lowerCamelCase : Any = self.dummy_text_encoder
_lowerCamelCase : List[str] = self.dummy_tokenizer
_lowerCamelCase : Tuple = self.dummy_transformer
_lowerCamelCase : Dict = VQDiffusionScheduler(self.num_embed )
_lowerCamelCase : Optional[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCamelCase__ )
_lowerCamelCase : Optional[Any] = VQDiffusionPipeline(
vqvae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , transformer=UpperCamelCase__ , scheduler=UpperCamelCase__ , learned_classifier_free_sampling_embeddings=UpperCamelCase__ , )
_lowerCamelCase : List[Any] = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_lowerCamelCase : str = 'teddy bear playing in the pool'
_lowerCamelCase : Optional[int] = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_lowerCamelCase : List[str] = pipe([prompt] , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='np' )
_lowerCamelCase : Any = output.images
_lowerCamelCase : str = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_lowerCamelCase : str = pipe(
[prompt] , generator=UpperCamelCase__ , output_type='np' , return_dict=UpperCamelCase__ , num_inference_steps=2 )[0]
_lowerCamelCase : str = image[0, -3:, -3:, -1]
_lowerCamelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
_lowerCamelCase : Optional[Any] = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def A_ ( self ):
_lowerCamelCase : int = 'cpu'
_lowerCamelCase : Optional[Any] = self.dummy_vqvae
_lowerCamelCase : Tuple = self.dummy_text_encoder
_lowerCamelCase : Dict = self.dummy_tokenizer
_lowerCamelCase : List[str] = self.dummy_transformer
_lowerCamelCase : List[Any] = VQDiffusionScheduler(self.num_embed )
_lowerCamelCase : Optional[int] = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCamelCase__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
_lowerCamelCase : Union[str, Any] = VQDiffusionPipeline(
vqvae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , transformer=UpperCamelCase__ , scheduler=UpperCamelCase__ , learned_classifier_free_sampling_embeddings=UpperCamelCase__ , )
_lowerCamelCase : List[Any] = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_lowerCamelCase : Union[str, Any] = 'teddy bear playing in the pool'
_lowerCamelCase : int = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_lowerCamelCase : str = pipe([prompt] , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='np' )
_lowerCamelCase : Dict = output.images
_lowerCamelCase : Union[str, Any] = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_lowerCamelCase : List[str] = pipe(
[prompt] , generator=UpperCamelCase__ , output_type='np' , return_dict=UpperCamelCase__ , num_inference_steps=2 )[0]
_lowerCamelCase : Dict = image[0, -3:, -3:, -1]
_lowerCamelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
_lowerCamelCase : List[Any] = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self ):
_lowerCamelCase : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
_lowerCamelCase : Tuple = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
_lowerCamelCase : Dict = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
_lowerCamelCase : str = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_lowerCamelCase : List[Any] = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=UpperCamelCase__ , output_type='np' , )
_lowerCamelCase : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0 | 368 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase ):
_lowerCamelCase : Dict = question_encoder
_lowerCamelCase : List[Any] = generator
_lowerCamelCase : Optional[Any] = self.question_encoder
def A_ ( self , lowercase ):
if os.path.isfile(lowercase ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(lowercase , exist_ok=lowercase )
_lowerCamelCase : List[Any] = os.path.join(lowercase , 'question_encoder_tokenizer' )
_lowerCamelCase : Dict = os.path.join(lowercase , 'generator_tokenizer' )
self.question_encoder.save_pretrained(lowercase )
self.generator.save_pretrained(lowercase )
@classmethod
def A_ ( cls , lowercase , **lowercase ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_lowerCamelCase : Optional[int] = kwargs.pop('config' , lowercase )
if config is None:
_lowerCamelCase : int = RagConfig.from_pretrained(lowercase )
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(
lowercase , config=config.question_encoder , subfolder='question_encoder_tokenizer' )
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained(
lowercase , config=config.generator , subfolder='generator_tokenizer' )
return cls(question_encoder=lowercase , generator=lowercase )
def __call__( self , *lowercase , **lowercase ):
return self.current_tokenizer(*lowercase , **lowercase )
def A_ ( self , *lowercase , **lowercase ):
return self.generator.batch_decode(*lowercase , **lowercase )
def A_ ( self , *lowercase , **lowercase ):
return self.generator.decode(*lowercase , **lowercase )
def A_ ( self ):
_lowerCamelCase : Any = self.question_encoder
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.generator
def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = "longest" , lowercase = None , lowercase = True , **lowercase , ):
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , lowercase , )
if max_length is None:
_lowerCamelCase : Optional[Any] = self.current_tokenizer.model_max_length
_lowerCamelCase : Optional[Any] = self(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , max_length=lowercase , padding=lowercase , truncation=lowercase , **lowercase , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowerCamelCase : int = self.current_tokenizer.model_max_length
_lowerCamelCase : str = self(
text_target=lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase , **lowercase , )
_lowerCamelCase : int = labels['input_ids']
return model_inputs | 12 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ):
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : Dict = 13
_lowerCamelCase : Union[str, Any] = 7
_lowerCamelCase : str = True
_lowerCamelCase : int = True
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Any = True
_lowerCamelCase : Union[str, Any] = 99
_lowerCamelCase : Union[str, Any] = 384
_lowerCamelCase : Optional[Any] = 2
_lowerCamelCase : str = 4
_lowerCamelCase : Optional[int] = 37
_lowerCamelCase : Union[str, Any] = 'gelu'
_lowerCamelCase : Optional[Any] = 0.1
_lowerCamelCase : Optional[int] = 0.1
_lowerCamelCase : Any = 512
_lowerCamelCase : Dict = 16
_lowerCamelCase : Dict = 2
_lowerCamelCase : Optional[Any] = 0.02
_lowerCamelCase : str = 3
_lowerCamelCase : Any = 4
_lowerCamelCase : int = 128
_lowerCamelCase : Tuple = 2
_lowerCamelCase : Optional[int] = 9
_lowerCamelCase : str = 1
_lowerCamelCase : Dict = None
def A_ ( self ):
_lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : List[Any] = None
if self.use_input_mask:
_lowerCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Union[str, Any] = None
if self.use_token_type_ids:
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : int = None
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : Any = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_lowercase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : List[Any] = TFConvBertModel(config=_lowercase )
_lowerCamelCase : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowerCamelCase : int = [input_ids, input_mask]
_lowerCamelCase : Dict = model(_lowercase )
_lowerCamelCase : str = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Tuple = TFConvBertForMaskedLM(config=_lowercase )
_lowerCamelCase : List[str] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_lowerCamelCase : str = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Optional[int] = self.num_labels
_lowerCamelCase : str = TFConvBertForSequenceClassification(config=_lowercase )
_lowerCamelCase : Any = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_lowerCamelCase : Optional[Any] = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Dict = self.num_choices
_lowerCamelCase : Any = TFConvBertForMultipleChoice(config=_lowercase )
_lowerCamelCase : Dict = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
_lowerCamelCase : Dict = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
_lowerCamelCase : Optional[Any] = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
_lowerCamelCase : Optional[int] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_lowerCamelCase : Union[str, Any] = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Any = self.num_labels
_lowerCamelCase : Dict = TFConvBertForTokenClassification(config=_lowercase )
_lowerCamelCase : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_lowerCamelCase : Any = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Any = TFConvBertForQuestionAnswering(config=_lowercase )
_lowerCamelCase : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_lowerCamelCase : Union[str, Any] = model(_lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self ):
_lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
),
) : str = config_and_inputs
_lowerCamelCase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( _lowerCAmelCase, _lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def A_ ( self ):
_lowerCamelCase : Tuple = TFConvBertModelTester(self )
_lowerCamelCase : str = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def A_ ( self ):
self.config_tester.run_common_tests()
def A_ ( self ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def A_ ( self ):
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def A_ ( self ):
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowercase )
def A_ ( self ):
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase )
def A_ ( self ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowercase )
@slow
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Tuple = True
if hasattr(_lowercase , 'use_cache' ):
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Any = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
_lowerCamelCase : Union[str, Any] = getattr(self.model_tester , 'key_length' , _lowercase )
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[int] = self._prepare_for_class(_lowercase , _lowercase )
_lowerCamelCase : Any = model_class(_lowercase )
_lowerCamelCase : List[str] = len(model(_lowercase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowercase , saved_model=_lowercase )
_lowerCamelCase : int = os.path.join(_lowercase , 'saved_model' , '1' )
_lowerCamelCase : int = tf.keras.models.load_model(_lowercase )
_lowerCamelCase : str = model(_lowercase )
if self.is_encoder_decoder:
_lowerCamelCase : List[str] = outputs['encoder_hidden_states']
_lowerCamelCase : str = outputs['encoder_attentions']
else:
_lowerCamelCase : Dict = outputs['hidden_states']
_lowerCamelCase : Optional[Any] = outputs['attentions']
self.assertEqual(len(_lowercase ) , _lowercase )
_lowerCamelCase : Tuple = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def A_ ( self ):
_lowerCamelCase : List[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_lowercase )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Any = True
_lowerCamelCase : str = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
_lowerCamelCase : Tuple = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
_lowerCamelCase : Any = getattr(self.model_tester , 'key_length' , _lowercase )
_lowerCamelCase : int = getattr(self.model_tester , 'key_length' , _lowercase )
def check_decoder_attentions_output(lowercase ):
_lowerCamelCase : Union[str, Any] = len(_lowercase )
self.assertEqual(out_len % 2 , 0 )
_lowerCamelCase : Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowercase ):
_lowerCamelCase : Optional[int] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : List[str] = False
_lowerCamelCase : int = model_class(_lowercase )
_lowerCamelCase : List[str] = model(self._prepare_for_class(_lowercase , _lowercase ) )
_lowerCamelCase : List[Any] = len(_lowercase )
self.assertEqual(config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
if self.is_encoder_decoder:
_lowerCamelCase : Any = model_class(_lowercase )
_lowerCamelCase : Dict = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(config.output_hidden_states , _lowercase )
check_decoder_attentions_output(_lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Optional[int] = model_class(_lowercase )
_lowerCamelCase : Optional[int] = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
# Check attention is always last and order is fine
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Tuple = True
_lowerCamelCase : Optional[Any] = model_class(_lowercase )
_lowerCamelCase : Optional[Any] = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_lowercase ) )
self.assertEqual(model.config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A_ ( self ):
_lowerCamelCase : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
_lowerCamelCase : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowerCamelCase : Optional[int] = model(_lowercase )[0]
_lowerCamelCase : Optional[Any] = [1, 6, 768]
self.assertEqual(output.shape , _lowercase )
_lowerCamelCase : str = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowercase , atol=1E-4 ) | 369 |
"""simple docstring"""
def _snake_case ( lowercase__ = 10 ):
if not isinstance(lowercase__ , lowercase__ ) or n < 0:
raise ValueError('Invalid input' )
_lowerCamelCase : str = 10**n
_lowerCamelCase : Union[str, Any] = 28433 * (pow(2 , 7830457 , lowercase__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"{solution(10) = }") | 12 | 0 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : int = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
_lowerCamelCase : Union[str, Any] = 'A painting of a squirrel eating a burger'
_lowerCamelCase : Optional[int] = jax.device_count()
_lowerCamelCase : Dict = num_samples * [prompt]
_lowerCamelCase : Optional[int] = sd_pipe.prepare_inputs(__lowerCAmelCase )
_lowerCamelCase : List[Any] = replicate(__lowerCAmelCase )
_lowerCamelCase : List[str] = shard(__lowerCAmelCase )
_lowerCamelCase : List[str] = jax.random.PRNGKey(0 )
_lowerCamelCase : Tuple = jax.random.split(__lowerCAmelCase , jax.device_count() )
_lowerCamelCase : Optional[int] = sd_pipe(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , num_inference_steps=25 , jit=__lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_lowerCamelCase : Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowerCamelCase : Tuple = images[0, 253:256, 253:256, -1]
_lowerCamelCase : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCamelCase : Optional[Any] = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def A_ ( self ):
_lowerCamelCase : str = 'stabilityai/stable-diffusion-2'
_lowerCamelCase, _lowerCamelCase : List[Any] = FlaxDPMSolverMultistepScheduler.from_pretrained(__lowerCAmelCase , subfolder='scheduler' )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
__lowerCAmelCase , scheduler=__lowerCAmelCase , revision='bf16' , dtype=jnp.bfloataa , )
_lowerCamelCase : List[Any] = scheduler_params
_lowerCamelCase : Tuple = 'A painting of a squirrel eating a burger'
_lowerCamelCase : List[Any] = jax.device_count()
_lowerCamelCase : List[str] = num_samples * [prompt]
_lowerCamelCase : List[Any] = sd_pipe.prepare_inputs(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = replicate(__lowerCAmelCase )
_lowerCamelCase : Dict = shard(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = jax.random.PRNGKey(0 )
_lowerCamelCase : List[str] = jax.random.split(__lowerCAmelCase , jax.device_count() )
_lowerCamelCase : List[Any] = sd_pipe(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , num_inference_steps=25 , jit=__lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_lowerCamelCase : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowerCamelCase : Optional[int] = images[0, 253:256, 253:256, -1]
_lowerCamelCase : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCamelCase : Optional[int] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 | 370 |
"""simple docstring"""
import argparse
import datetime
def _snake_case ( lowercase__ ):
_lowerCamelCase : Dict = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
_lowerCamelCase : str = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(lowercase__ ) < 11:
raise ValueError('Must be 10 characters long' )
# Get month
_lowerCamelCase : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('Month must be between 1 - 12' )
_lowerCamelCase : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get day
_lowerCamelCase : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('Date must be between 1 - 31' )
# Get second separator
_lowerCamelCase : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get year
_lowerCamelCase : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?' )
# Get datetime obj for validation
_lowerCamelCase : str = datetime.date(int(lowercase__ ) , int(lowercase__ ) , int(lowercase__ ) )
# Start math
if m <= 2:
_lowerCamelCase : str = y - 1
_lowerCamelCase : Tuple = m + 12
# maths var
_lowerCamelCase : int = int(str(lowercase__ )[:2] )
_lowerCamelCase : int = int(str(lowercase__ )[2:] )
_lowerCamelCase : int = int(2.6 * m - 5.3_9 )
_lowerCamelCase : int = int(c / 4 )
_lowerCamelCase : int = int(k / 4 )
_lowerCamelCase : int = int(d + k )
_lowerCamelCase : int = int(t + u + v + x )
_lowerCamelCase : int = int(z - (2 * c) )
_lowerCamelCase : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.' )
# Response
_lowerCamelCase : str = f'''Your date {date_input}, is a {days[str(lowercase__ )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = argparse.ArgumentParser(
description=(
"""Find out what day of the week nearly any date is or was. Enter """
"""date as a string in the mm-dd-yyyy or mm/dd/yyyy format"""
)
)
parser.add_argument(
"""date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)"""
)
lowercase__ = parser.parse_args()
zeller(args.date_input) | 12 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase__ = {"""tokenization_bertweet""": ["""BertweetTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 371 |
"""simple docstring"""
import re
def _snake_case ( lowercase__ ):
_lowerCamelCase : Optional[int] = re.compile(r'^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$' )
if match := re.search(lowercase__ , lowercase__ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("""+918827897895""")) | 12 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod() | 350 |
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowercase__ = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowercase__ = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
lowercase__ = """zero2"""
lowercase__ = """zero3"""
lowercase__ = [ZEROa, ZEROa]
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_lowerCamelCase : List[str] = parameterized.to_safe_name('_'.join(str(lowercase__ ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
lowercase__ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
def A_ ( self , lowercase ):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = True , lowercase = True , lowercase = True , ):
_lowerCamelCase : List[str] = models[model]
_lowerCamelCase : Optional[int] = self.run_trainer(
stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , )
self.do_checks(lowercase )
return output_dir
def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = 1 , lowercase = True , lowercase = True , ):
_lowerCamelCase : List[str] = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase )
_lowerCamelCase : Any = F'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_lowerCamelCase : Optional[int] = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_lowerCamelCase : Optional[Any] = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_lowerCamelCase : Dict = self.get_launcher(lowercase )
_lowerCamelCase : Union[str, Any] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase , env=self.get_env() )
return output_dir
def A_ ( self , lowercase=False ):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
_lowerCamelCase : Any = min(2 , get_gpu_count() ) if distributed else 1
return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split() | 12 | 0 |
"""simple docstring"""
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowercase__ = TypeVar("""T""")
def _snake_case ( lowercase__ ):
return (position - 1) // 2
def _snake_case ( lowercase__ ):
return (2 * position) + 1
def _snake_case ( lowercase__ ):
return (2 * position) + 2
class lowerCAmelCase__ ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
_lowerCamelCase : list[tuple[T, int]] = []
_lowerCamelCase : dict[T, int] = {}
_lowerCamelCase : int = 0
def __len__( self ):
return self.elements
def __repr__( self ):
return str(self.heap )
def A_ ( self ):
# Check if the priority queue is empty
return self.elements == 0
def A_ ( self , lowercase , lowercase ):
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
_lowerCamelCase : Dict = self.elements
self.elements += 1
self._bubble_up(snake_case_ )
def A_ ( self ):
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
_lowerCamelCase : Tuple = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_lowerCamelCase : int = self.heap[0]
self._bubble_down(snake_case_ )
return elem
def A_ ( self , lowercase , lowercase ):
# Update the weight of the given key
_lowerCamelCase : Dict = self.position_map[elem]
_lowerCamelCase : Union[str, Any] = (elem, weight)
if position > 0:
_lowerCamelCase : Optional[Any] = get_parent_position(snake_case_ )
_lowerCamelCase : List[Any] = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(snake_case_ )
else:
self._bubble_down(snake_case_ )
else:
self._bubble_down(snake_case_ )
def A_ ( self , lowercase ):
# Place a node at the proper position (upward movement) [to be used internally
# only]
_lowerCamelCase : Dict = self.position_map[elem]
if curr_pos == 0:
return None
_lowerCamelCase : int = get_parent_position(snake_case_ )
_lowerCamelCase : Dict = self.heap[curr_pos]
_lowerCamelCase : Any = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(snake_case_ , snake_case_ )
return self._bubble_up(snake_case_ )
return None
def A_ ( self , lowercase ):
# Place a node at the proper position (downward movement) [to be used
# internally only]
_lowerCamelCase : Optional[Any] = self.position_map[elem]
_lowerCamelCase : Optional[int] = self.heap[curr_pos]
_lowerCamelCase : List[str] = get_child_left_position(snake_case_ )
_lowerCamelCase : int = get_child_right_position(snake_case_ )
if child_left_position < self.elements and child_right_position < self.elements:
_lowerCamelCase : Union[str, Any] = self.heap[child_left_position]
_lowerCamelCase : int = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(snake_case_ , snake_case_ )
return self._bubble_down(snake_case_ )
if child_left_position < self.elements:
_lowerCamelCase : List[Any] = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(snake_case_ , snake_case_ )
return self._bubble_down(snake_case_ )
else:
return None
if child_right_position < self.elements:
_lowerCamelCase : List[Any] = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(snake_case_ , snake_case_ )
return self._bubble_down(snake_case_ )
return None
def A_ ( self , lowercase , lowercase ):
# Swap the nodes at the given positions
_lowerCamelCase : Tuple = self.heap[nodea_pos][0]
_lowerCamelCase : List[Any] = self.heap[nodea_pos][0]
_lowerCamelCase : List[str] = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_lowerCamelCase : Any = nodea_pos
_lowerCamelCase : List[Any] = nodea_pos
class lowerCAmelCase__ ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
_lowerCamelCase : dict[T, dict[T, int]] = {}
_lowerCamelCase : int = 0
def __repr__( self ):
return str(self.connections )
def __len__( self ):
return self.nodes
def A_ ( self , lowercase ):
# Add a node in the graph if it is not in the graph
if node not in self.connections:
_lowerCamelCase : str = {}
self.nodes += 1
def A_ ( self , lowercase , lowercase , lowercase ):
# Add an edge between 2 nodes in the graph
self.add_node(snake_case_ )
self.add_node(snake_case_ )
_lowerCamelCase : Optional[int] = weight
_lowerCamelCase : Optional[Any] = weight
def _snake_case ( lowercase__ , ):
_lowerCamelCase : dict[T, int] = {node: maxsize for node in graph.connections}
_lowerCamelCase : dict[T, T | None] = {node: None for node in graph.connections}
_lowerCamelCase : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowerCAmelCase__ , lowerCAmelCase__ )
if priority_queue.is_empty():
return dist, parent
# initialization
_lowerCamelCase : List[str] = priority_queue.extract_min()
_lowerCamelCase : Union[str, Any] = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_lowerCamelCase : str = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase__ , dist[neighbour] )
_lowerCamelCase : Optional[Any] = node
# running prim's algorithm
while not priority_queue.is_empty():
_lowerCamelCase : Any = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_lowerCamelCase : Optional[Any] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase__ , dist[neighbour] )
_lowerCamelCase : List[Any] = node
return dist, parent | 351 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = ["""pixel_values"""]
def __init__( self , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = 8 , **lowercase , ):
super().__init__(**lowercase )
_lowerCamelCase : Optional[Any] = do_rescale
_lowerCamelCase : Union[str, Any] = rescale_factor
_lowerCamelCase : Any = do_pad
_lowerCamelCase : Optional[int] = pad_size
def A_ ( self , lowercase , lowercase , lowercase = None , **lowercase ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A_ ( self , lowercase , lowercase , lowercase = None ):
_lowerCamelCase, _lowerCamelCase : Tuple = get_image_size(lowercase )
_lowerCamelCase : Union[str, Any] = (old_height // size + 1) * size - old_height
_lowerCamelCase : Tuple = (old_width // size + 1) * size - old_width
return pad(lowercase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=lowercase )
def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
_lowerCamelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Any = do_pad if do_pad is not None else self.do_pad
_lowerCamelCase : int = pad_size if pad_size is not None else self.pad_size
_lowerCamelCase : Dict = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
_lowerCamelCase : Dict = [to_numpy_array(lowercase ) for image in images]
if do_rescale:
_lowerCamelCase : str = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_pad:
_lowerCamelCase : str = [self.pad(lowercase , size=lowercase ) for image in images]
_lowerCamelCase : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
_lowerCamelCase : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=lowercase , tensor_type=lowercase ) | 12 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = """▁"""
lowercase__ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowercase__ = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
lowercase__ = {
"""facebook/m2m100_418M""": 1024,
}
# fmt: off
lowercase__ = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = ["input_ids", "attention_mask"]
lowerCamelCase__ = []
lowerCamelCase__ = []
def __init__( self , lowercase , lowercase , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<pad>" , lowercase="<unk>" , lowercase="m2m100" , lowercase = None , lowercase=8 , **lowercase , ):
_lowerCamelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase : int = language_codes
_lowerCamelCase : Union[str, Any] = FAIRSEQ_LANGUAGE_CODES[language_codes]
_lowerCamelCase : List[str] = {lang_code: F'''__{lang_code}__''' for lang_code in fairseq_language_code}
_lowerCamelCase : Tuple = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(lowercase )
for lang_code in fairseq_language_code
if self.get_lang_token(lowercase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowercase , tgt_lang=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , unk_token=lowercase , pad_token=lowercase , language_codes=lowercase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=lowercase , **lowercase , )
_lowerCamelCase : List[Any] = vocab_file
_lowerCamelCase : Union[str, Any] = load_json(lowercase )
_lowerCamelCase : Any = {v: k for k, v in self.encoder.items()}
_lowerCamelCase : Tuple = spm_file
_lowerCamelCase : List[str] = load_spm(lowercase , self.sp_model_kwargs )
_lowerCamelCase : str = len(self.encoder )
_lowerCamelCase : List[str] = {
self.get_lang_token(lowercase ): self.encoder_size + i for i, lang_code in enumerate(lowercase )
}
_lowerCamelCase : Optional[Any] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(lowercase )}
_lowerCamelCase : Tuple = {v: k for k, v in self.lang_token_to_id.items()}
_lowerCamelCase : Tuple = src_lang if src_lang is not None else 'en'
_lowerCamelCase : Any = tgt_lang
_lowerCamelCase : Tuple = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_lowerCamelCase : List[Any] = num_madeup_words
@property
def A_ ( self ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def A_ ( self ):
return self._src_lang
@src_lang.setter
def A_ ( self , lowercase ):
_lowerCamelCase : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A_ ( self , lowercase ):
return self.sp_model.encode(lowercase , out_type=lowercase )
def A_ ( self , lowercase ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(lowercase , self.encoder[self.unk_token] )
def A_ ( self , lowercase ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(lowercase , self.unk_token )
def A_ ( self , lowercase ):
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : List[str] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase ) + token
_lowerCamelCase : List[str] = []
else:
current_sub_tokens.append(lowercase )
out_string += self.sp_model.decode(lowercase )
return out_string.strip()
def A_ ( self , lowercase , lowercase = None , lowercase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
_lowerCamelCase : Optional[Any] = [1] * len(self.prefix_tokens )
_lowerCamelCase : Tuple = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase )) + suffix_ones
return prefix_ones + ([0] * len(lowercase )) + ([0] * len(lowercase )) + suffix_ones
def A_ ( self , lowercase , lowercase = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A_ ( self ):
_lowerCamelCase : Optional[Any] = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCamelCase : str = self.__dict__.copy()
_lowerCamelCase : List[Any] = None
return state
def __setstate__( self , lowercase ):
_lowerCamelCase : Dict = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowerCamelCase : List[str] = {}
_lowerCamelCase : int = load_spm(self.spm_file , self.sp_model_kwargs )
def A_ ( self , lowercase , lowercase = None ):
_lowerCamelCase : str = Path(lowercase )
if not save_dir.is_dir():
raise OSError(F'''{save_directory} should be a directory''' )
_lowerCamelCase : Dict = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
_lowerCamelCase : int = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , lowercase )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowercase )
elif not os.path.isfile(self.spm_file ):
with open(lowercase , 'wb' ) as fi:
_lowerCamelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (str(lowercase ), str(lowercase ))
def A_ ( self , lowercase , lowercase = "en" , lowercase = None , lowercase = "ro" , **lowercase , ):
_lowerCamelCase : Tuple = src_lang
_lowerCamelCase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase )
def A_ ( self , lowercase , lowercase , lowercase , **lowercase ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_lowerCamelCase : List[Any] = src_lang
_lowerCamelCase : Dict = self(lowercase , add_special_tokens=lowercase , **lowercase )
_lowerCamelCase : List[Any] = self.get_lang_id(lowercase )
_lowerCamelCase : Optional[Any] = tgt_lang_id
return inputs
def A_ ( self ):
self.set_src_lang_special_tokens(self.src_lang )
def A_ ( self ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def A_ ( self , lowercase ):
_lowerCamelCase : Optional[Any] = self.get_lang_token(lowercase )
_lowerCamelCase : Optional[Any] = self.lang_token_to_id[lang_token]
_lowerCamelCase : Dict = [self.cur_lang_id]
_lowerCamelCase : List[str] = [self.eos_token_id]
def A_ ( self , lowercase ):
_lowerCamelCase : List[Any] = self.get_lang_token(lowercase )
_lowerCamelCase : List[Any] = self.lang_token_to_id[lang_token]
_lowerCamelCase : Optional[Any] = [self.cur_lang_id]
_lowerCamelCase : Optional[int] = [self.eos_token_id]
def A_ ( self , lowercase ):
return self.lang_code_to_token[lang]
def A_ ( self , lowercase ):
_lowerCamelCase : int = self.get_lang_token(lowercase )
return self.lang_token_to_id[lang_token]
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : str = sentencepiece.SentencePieceProcessor(**lowercase__ )
spm.Load(str(lowercase__ ) )
return spm
def _snake_case ( lowercase__ ):
with open(lowercase__ , 'r' ) as f:
return json.load(lowercase__ )
def _snake_case ( lowercase__ , lowercase__ ):
with open(lowercase__ , 'w' ) as f:
json.dump(lowercase__ , lowercase__ , indent=2 ) | 352 |
"""simple docstring"""
import os
import string
import sys
lowercase__ = 1 << 8
lowercase__ = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
lowercase__ = KEYMAP["""up"""]
lowercase__ = KEYMAP["""left"""]
if sys.platform == "win32":
lowercase__ = []
lowercase__ = {
B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
lowercase__ = ord(str(i))
def _snake_case ( ):
if os.name == "nt":
import msvcrt
_lowerCamelCase : Any = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(lowercase__ ) == 0:
# Read the keystroke
_lowerCamelCase : str = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_lowerCamelCase : List[Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_lowerCamelCase : Union[str, Any] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(lowercase__ )
if ord(lowercase__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_lowerCamelCase : List[Any] = chr(KEYMAP['esc'] )
except KeyError:
_lowerCamelCase : int = cha[1]
else:
_lowerCamelCase : Optional[int] = ch.decode(lowercase__ )
else:
_lowerCamelCase : Union[str, Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_lowerCamelCase : List[str] = sys.stdin.fileno()
_lowerCamelCase : Tuple = termios.tcgetattr(lowercase__ )
try:
tty.setraw(lowercase__ )
_lowerCamelCase : Optional[Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(lowercase__ , termios.TCSADRAIN , lowercase__ )
return ch
def _snake_case ( ):
_lowerCamelCase : int = get_raw_chars()
if ord(lowercase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(lowercase__ ) == KEYMAP["esc"]:
_lowerCamelCase : Union[str, Any] = get_raw_chars()
if ord(lowercase__ ) == KEYMAP["mod_int"]:
_lowerCamelCase : List[Any] = get_raw_chars()
if ord(lowercase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowercase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(lowercase__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"] | 12 | 0 |
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _snake_case ( lowercase__ , lowercase__ ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_lowerCamelCase : List[Any] = flax_key_tuple[:-1] + ('weight',)
_lowerCamelCase : Union[str, Any] = torch.permute(lowercase__ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowercase__ ):
# linear layer
_lowerCamelCase : Any = flax_key_tuple[:-1] + ('weight',)
_lowerCamelCase : Optional[Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowerCamelCase : Optional[Any] = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
if "metadata" in layer:
_lowerCamelCase : Optional[int] = layer.split('metadata' )
_lowerCamelCase : Tuple = ''.join(split_layer[0] )[:-1]
_lowerCamelCase : Tuple = [tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
_lowerCamelCase : List[str] = layer.split('kvstore' )
_lowerCamelCase : Any = ''.join(split_layer[0] )[:-1]
_lowerCamelCase : List[Any] = [tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
_lowerCamelCase : List[Any] = layer.split('/' )
_lowerCamelCase : Dict = '/'.join(split_layer[:-1] )
_lowerCamelCase : Dict = (split_layer[-1],)
if "kvstore/path" in layer:
_lowerCamelCase : str = f'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
_lowerCamelCase : str = 'file'
else:
_lowerCamelCase : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _snake_case ( lowercase__ , lowercase__ ):
"""simple docstring"""
_lowerCamelCase : Dict = rename_keys(lowercase__ )
_lowerCamelCase : Optional[int] = {}
for k, v in current_block.items():
_lowerCamelCase : List[str] = v
_lowerCamelCase : Dict = new_current_block
torch.save(lowercase__ , lowercase__ )
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = WEIGHTS_NAME ):
"""simple docstring"""
_lowerCamelCase : Dict = convert_file_size_to_int(lowercase__ )
_lowerCamelCase : Tuple = []
_lowerCamelCase : int = {}
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : int = 0
os.makedirs(lowercase__ , exist_ok=lowercase__ )
with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp:
_lowerCamelCase : List[str] = serialization.msgpack_restore(fp.read() )['optimizer']['target']
_lowerCamelCase : List[str] = flatten_dict(lowercase__ , sep='/' )
_lowerCamelCase : Optional[Any] = {}
for layer in checkpoint_info.keys():
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = get_key_and_tensorstore_dict(
lowercase__ , lowercase__ , lowercase__ )
if curr_real_layer_name in all_layers:
_lowerCamelCase : Any = content
else:
_lowerCamelCase : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_lowerCamelCase : List[str] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_lowerCamelCase : Any = torch.tensor(lowercase__ )
_lowerCamelCase : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_lowerCamelCase, _lowerCamelCase : Tuple = rename_base_flax_keys(tuple(key.split('/' ) ) , lowercase__ )
_lowerCamelCase : List[str] = '/'.join(lowercase__ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_lowerCamelCase : Dict = os.path.join(
lowercase__ , weights_name.replace('.bin' , f'''-{len(lowercase__ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(lowercase__ , lowercase__ )
sharded_state_dicts.append(current_block.keys() )
del current_block
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : int = 0
_lowerCamelCase : Dict = raw_weights.to(getattr(lowercase__ , lowercase__ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_lowerCamelCase : str = os.path.join(lowercase__ , weights_name.replace('.bin' , f'''-{len(lowercase__ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(lowercase__ , lowercase__ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowercase__ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : int = {}
for idx, shard in enumerate(lowercase__ ):
_lowerCamelCase : str = weights_name.replace(
'.bin' , f'''-{idx+1:05d}-of-{len(lowercase__ ):05d}.bin''' ) # len(sharded_state_dicts):05d}
_lowerCamelCase : int = os.path.join(lowercase__ , weights_name.replace('.bin' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(lowercase__ , os.path.join(lowercase__ , lowercase__ ) )
_lowerCamelCase : List[str] = shard
for key in shard:
_lowerCamelCase : List[str] = shard_file
# Add the metadata
_lowerCamelCase : str = {'total_size': total_size}
_lowerCamelCase : Any = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(lowercase__ , lowercase__ ) , 'w' , encoding='utf-8' ) as f:
_lowerCamelCase : Optional[Any] = json.dumps(lowercase__ , indent=2 , sort_keys=lowercase__ ) + '\n'
f.write(lowercase__ )
return metadata, index
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
lowercase__ = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _snake_case ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_lowerCamelCase : Tuple = SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
_lowerCamelCase : Optional[Any] = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' )
_lowerCamelCase : List[str] = TaTokenizer.from_pretrained('t5-small' )
_lowerCamelCase : Tuple = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
_lowerCamelCase : List[Any] = tokenizer(lowercase__ , return_tensors='pt' ).input_ids
_lowerCamelCase : Optional[Any] = model.generate(lowercase__ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 353 |
"""simple docstring"""
from typing import Any
def _snake_case ( lowercase__ ):
if not input_list:
return []
_lowerCamelCase : Any = [input_list.count(lowercase__ ) for value in input_list]
_lowerCamelCase : Dict = max(lowercase__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod() | 12 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ = '''upernet'''
def __init__( self , lowercase=None , lowercase=512 , lowercase=0.02 , lowercase=[1, 2, 3, 6] , lowercase=True , lowercase=0.4 , lowercase=384 , lowercase=256 , lowercase=1 , lowercase=False , lowercase=255 , **lowercase , ):
super().__init__(**UpperCamelCase_ )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_lowerCamelCase : int = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowerCamelCase : Optional[Any] = backbone_config.get('model_type' )
_lowerCamelCase : int = CONFIG_MAPPING[backbone_model_type]
_lowerCamelCase : Optional[Any] = config_class.from_dict(UpperCamelCase_ )
_lowerCamelCase : List[Any] = backbone_config
_lowerCamelCase : Tuple = hidden_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : str = pool_scales
_lowerCamelCase : Dict = use_auxiliary_head
_lowerCamelCase : List[str] = auxiliary_loss_weight
_lowerCamelCase : Optional[int] = auxiliary_in_channels
_lowerCamelCase : int = auxiliary_channels
_lowerCamelCase : Union[str, Any] = auxiliary_num_convs
_lowerCamelCase : Union[str, Any] = auxiliary_concat_input
_lowerCamelCase : str = loss_ignore_index
def A_ ( self ):
_lowerCamelCase : List[str] = copy.deepcopy(self.__dict__ )
_lowerCamelCase : Optional[int] = self.backbone_config.to_dict()
_lowerCamelCase : Any = self.__class__.model_type
return output | 354 |
"""simple docstring"""
def _snake_case ( lowercase__ ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
_lowerCamelCase : List[str] = len(lowercase__ )
_lowerCamelCase : List[str] = max(lowercase__ )
_lowerCamelCase : List[str] = min(lowercase__ )
# create the counting array
_lowerCamelCase : List[Any] = coll_max + 1 - coll_min
_lowerCamelCase : List[Any] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowercase__ ):
_lowerCamelCase : Optional[int] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
_lowerCamelCase : Dict = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowercase__ ) ):
_lowerCamelCase : Any = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _snake_case ( lowercase__ ):
return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
lowercase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase__ = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted)) | 12 | 0 |
"""simple docstring"""
import argparse
import copy
def _snake_case ( lowercase__ ):
_lowerCamelCase : Union[str, Any] = {}
with open(lowerCamelCase_ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_lowerCamelCase : Dict = []
_list.append([line.split()[1], line.split()[2]] )
_lowerCamelCase : List[str] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_lowerCamelCase : str = []
_list.append([line.split()[0], line.split()[2]] )
_lowerCamelCase : Union[str, Any] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _snake_case ( lowercase__ , lowercase__ ):
with open(lowerCamelCase_ ) as f:
_lowerCamelCase : Any = f.read(1 )
_lowerCamelCase : Dict = start_node
_lowerCamelCase : Dict = []
_lowerCamelCase : List[Any] = start_node
_lowerCamelCase : List[str] = 0
while visiting not in first_solution:
_lowerCamelCase : Dict = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(lowerCamelCase_ ) and k[0] not in first_solution:
_lowerCamelCase : int = k[1]
_lowerCamelCase : Optional[int] = k[0]
first_solution.append(lowerCamelCase_ )
_lowerCamelCase : Tuple = distance_of_first_solution + int(lowerCamelCase_ )
_lowerCamelCase : str = best_node
first_solution.append(lowerCamelCase_ )
_lowerCamelCase : List[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_lowerCamelCase : Union[str, Any] = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Union[str, Any] = []
for n in solution[1:-1]:
_lowerCamelCase : Optional[Any] = solution.index(lowerCamelCase_ )
for kn in solution[1:-1]:
_lowerCamelCase : Any = solution.index(lowerCamelCase_ )
if n == kn:
continue
_lowerCamelCase : int = copy.deepcopy(lowerCamelCase_ )
_lowerCamelCase : List[Any] = kn
_lowerCamelCase : Any = n
_lowerCamelCase : List[Any] = 0
for k in _tmp[:-1]:
_lowerCamelCase : str = _tmp[_tmp.index(lowerCamelCase_ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_lowerCamelCase : List[Any] = distance + int(i[1] )
_tmp.append(lowerCamelCase_ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_lowerCamelCase : List[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda lowercase__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : str = 1
_lowerCamelCase : Tuple = first_solution
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Optional[Any] = distance_of_first_solution
_lowerCamelCase : Optional[Any] = solution
while count <= iters:
_lowerCamelCase : Union[str, Any] = find_neighborhood(lowerCamelCase_ , lowerCamelCase_ )
_lowerCamelCase : int = 0
_lowerCamelCase : Optional[Any] = neighborhood[index_of_best_solution]
_lowerCamelCase : int = len(lowerCamelCase_ ) - 1
_lowerCamelCase : int = False
while not found:
_lowerCamelCase : Any = 0
while i < len(lowerCamelCase_ ):
if best_solution[i] != solution[i]:
_lowerCamelCase : List[str] = best_solution[i]
_lowerCamelCase : List[Any] = solution[i]
break
_lowerCamelCase : int = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Dict = best_solution[:-1]
_lowerCamelCase : Optional[int] = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_lowerCamelCase : List[str] = cost
_lowerCamelCase : Union[str, Any] = solution
else:
_lowerCamelCase : Optional[Any] = index_of_best_solution + 1
_lowerCamelCase : Union[str, Any] = neighborhood[index_of_best_solution]
if len(lowerCamelCase_ ) >= size:
tabu_list.pop(0 )
_lowerCamelCase : Optional[int] = count + 1
return best_solution_ever, best_cost
def _snake_case ( lowercase__=None ):
_lowerCamelCase : Union[str, Any] = generate_neighbours(args.File )
_lowerCamelCase : List[str] = generate_first_solution(
args.File , lowerCamelCase_ )
_lowerCamelCase : List[Any] = tabu_search(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , args.Iterations , args.Size , )
print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args()) | 355 |
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
lowercase__ = parser.parse_args()
lowercase__ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 12 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=18 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , ):
_lowerCamelCase : int = size if size is not None else {'height': 18, 'width': 18}
_lowerCamelCase : List[str] = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Optional[int] = num_channels
_lowerCamelCase : Dict = image_size
_lowerCamelCase : int = min_resolution
_lowerCamelCase : Tuple = max_resolution
_lowerCamelCase : List[str] = do_resize
_lowerCamelCase : Dict = size
_lowerCamelCase : Dict = do_normalize
def A_ ( self ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCAmelCase__ ( lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = ImageGPTImageProcessor if is_vision_available() else None
def A_ ( self ):
_lowerCamelCase : Dict = ImageGPTImageProcessingTester(self )
@property
def A_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , 'clusters' ) )
self.assertTrue(hasattr(_A , 'do_resize' ) )
self.assertTrue(hasattr(_A , 'size' ) )
self.assertTrue(hasattr(_A , 'do_normalize' ) )
def A_ ( self ):
_lowerCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
_lowerCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def A_ ( self ):
_lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
_lowerCamelCase : Optional[int] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_A , obj[key] ) )
else:
self.assertEqual(obj[key] , _A )
def A_ ( self ):
_lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : List[Any] = os.path.join(_A , 'image_processor.json' )
image_processor_first.to_json_file(_A )
_lowerCamelCase : Optional[Any] = self.image_processing_class.from_json_file(_A ).to_dict()
_lowerCamelCase : Optional[int] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_A , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _A )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_A )
_lowerCamelCase : Optional[int] = self.image_processing_class.from_pretrained(_A ).to_dict()
_lowerCamelCase : Any = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_A , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _A )
@unittest.skip('ImageGPT requires clusters at initialization' )
def A_ ( self ):
pass
def _snake_case ( ):
_lowerCamelCase : Tuple = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
_lowerCamelCase : List[str] = Image.open(dataset[4]['file'] )
_lowerCamelCase : List[Any] = Image.open(dataset[5]['file'] )
_lowerCamelCase : Any = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A_ ( self ):
_lowerCamelCase : Optional[int] = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' )
_lowerCamelCase : Union[str, Any] = prepare_images()
# test non-batched
_lowerCamelCase : List[Any] = image_processing(images[0] , return_tensors='pt' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
_lowerCamelCase : int = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , _A )
# test batched
_lowerCamelCase : List[str] = image_processing(_A , return_tensors='pt' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
_lowerCamelCase : Any = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , _A ) | 356 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = (UnCLIPScheduler,)
def A_ ( self , **lowercase ):
_lowerCamelCase : Any = {
'num_train_timesteps': 1000,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**lowercase )
return config
def A_ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase )
def A_ ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowercase )
def A_ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase )
def A_ ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowercase )
def A_ ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowercase )
def A_ ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowercase , prev_timestep=lowercase )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[int] = self.get_scheduler_config(variance_type='fixed_small_log' )
_lowerCamelCase : str = scheduler_class(**lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5
def A_ ( self ):
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config(variance_type='learned_range' )
_lowerCamelCase : int = scheduler_class(**lowercase )
_lowerCamelCase : List[str] = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowercase ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=lowercase ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=lowercase ) - -0.0_01_00_11 < 1E-5
def A_ ( self ):
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config()
_lowerCamelCase : Tuple = scheduler_class(**lowercase )
_lowerCamelCase : Union[str, Any] = scheduler.timesteps
_lowerCamelCase : Any = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter
_lowerCamelCase : Optional[int] = torch.manual_seed(0 )
for i, t in enumerate(lowercase ):
# 1. predict noise residual
_lowerCamelCase : Tuple = model(lowercase , lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : List[Any] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample
_lowerCamelCase : Optional[int] = pred_prev_sample
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) )
_lowerCamelCase : List[Any] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def A_ ( self ):
_lowerCamelCase : Tuple = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : Optional[Any] = scheduler_class(**lowercase )
scheduler.set_timesteps(25 )
_lowerCamelCase : Optional[Any] = scheduler.timesteps
_lowerCamelCase : Optional[int] = self.dummy_model()
_lowerCamelCase : Any = self.dummy_sample_deter
_lowerCamelCase : str = torch.manual_seed(0 )
for i, t in enumerate(lowercase ):
# 1. predict noise residual
_lowerCamelCase : List[Any] = model(lowercase , lowercase )
if i + 1 == timesteps.shape[0]:
_lowerCamelCase : Optional[int] = None
else:
_lowerCamelCase : List[str] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : Union[str, Any] = scheduler.step(
lowercase , lowercase , lowercase , prev_timestep=lowercase , generator=lowercase ).prev_sample
_lowerCamelCase : List[Any] = pred_prev_sample
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) )
_lowerCamelCase : List[str] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def A_ ( self ):
pass
def A_ ( self ):
pass | 12 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowercase__ = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def _snake_case ( lowercase__ ):
if isinstance(_A , torch.Tensor ):
return image
elif isinstance(_A , PIL.Image.Image ):
_lowerCamelCase : str = [image]
_lowerCamelCase : Dict = [trans(img.convert('RGB' ) ) for img in image]
_lowerCamelCase : Optional[int] = torch.stack(_A )
return image
class lowerCAmelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self , lowercase , lowercase ):
super().__init__()
# make sure scheduler can always be converted to DDIM
_lowerCamelCase : str = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__snake_case , scheduler=__snake_case )
def A_ ( self , lowercase ):
if strength < 0 or strength > 1:
raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def A_ ( self , lowercase , lowercase , lowercase ):
# get the original timestep using init_timestep
_lowerCamelCase : Union[str, Any] = min(int(num_inference_steps * strength ) , __snake_case )
_lowerCamelCase : str = max(num_inference_steps - init_timestep , 0 )
_lowerCamelCase : int = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=None ):
if not isinstance(__snake_case , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__snake_case )}''' )
_lowerCamelCase : Union[str, Any] = image.to(device=__snake_case , dtype=__snake_case )
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__snake_case )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
_lowerCamelCase : List[Any] = init_latents.shape
_lowerCamelCase : str = randn_tensor(__snake_case , generator=__snake_case , device=__snake_case , dtype=__snake_case )
# get latents
print('add noise to latents at timestep' , __snake_case )
_lowerCamelCase : List[Any] = self.scheduler.add_noise(__snake_case , __snake_case , __snake_case )
_lowerCamelCase : Dict = init_latents
return latents
@torch.no_grad()
def __call__( self , lowercase = None , lowercase = 0.8 , lowercase = 1 , lowercase = None , lowercase = 0.0 , lowercase = 50 , lowercase = None , lowercase = "pil" , lowercase = True , ):
self.check_inputs(__snake_case )
# 2. Preprocess image
_lowerCamelCase : Tuple = preprocess(__snake_case )
# 3. set timesteps
self.scheduler.set_timesteps(__snake_case , device=self.device )
_lowerCamelCase : Optional[Any] = self.get_timesteps(__snake_case , __snake_case , self.device )
_lowerCamelCase : List[str] = timesteps[:1].repeat(__snake_case )
# 4. Prepare latent variables
_lowerCamelCase : List[str] = self.prepare_latents(__snake_case , __snake_case , __snake_case , self.unet.dtype , self.device , __snake_case )
_lowerCamelCase : Optional[Any] = latents
# 5. Denoising loop
for t in self.progress_bar(__snake_case ):
# 1. predict noise model_output
_lowerCamelCase : List[Any] = self.unet(__snake_case , __snake_case ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_lowerCamelCase : Optional[int] = self.scheduler.step(
__snake_case , __snake_case , __snake_case , eta=__snake_case , use_clipped_model_output=__snake_case , generator=__snake_case , ).prev_sample
_lowerCamelCase : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCamelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCamelCase : List[str] = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=__snake_case ) | 357 |
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """data2vec-audio"""
def __init__( self , lowercase=32 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-5 , lowercase="gelu" , lowercase=(512, 512, 512, 512, 512, 512, 512) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(10, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=16 , lowercase=19 , lowercase=5 , lowercase=0.05 , lowercase=10 , lowercase=2 , lowercase=0.0 , lowercase=10 , lowercase=0 , lowercase="sum" , lowercase=False , lowercase=False , lowercase=256 , lowercase=(512, 512, 512, 512, 1500) , lowercase=(5, 3, 3, 1, 1) , lowercase=(1, 2, 3, 1, 1) , lowercase=512 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=3 , lowercase=2 , lowercase=3 , lowercase=None , **lowercase , ):
super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase )
_lowerCamelCase : str = hidden_size
_lowerCamelCase : str = feat_extract_activation
_lowerCamelCase : Optional[Any] = list(lowercase )
_lowerCamelCase : Dict = list(lowercase )
_lowerCamelCase : Dict = list(lowercase )
_lowerCamelCase : Optional[Any] = conv_bias
_lowerCamelCase : Union[str, Any] = num_conv_pos_embeddings
_lowerCamelCase : List[Any] = num_conv_pos_embedding_groups
_lowerCamelCase : List[Any] = conv_pos_kernel_size
_lowerCamelCase : Optional[int] = len(self.conv_dim )
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Any = hidden_dropout
_lowerCamelCase : Union[str, Any] = attention_dropout
_lowerCamelCase : str = activation_dropout
_lowerCamelCase : Any = feat_proj_dropout
_lowerCamelCase : Tuple = final_dropout
_lowerCamelCase : Union[str, Any] = layerdrop
_lowerCamelCase : List[Any] = layer_norm_eps
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : Optional[int] = vocab_size
_lowerCamelCase : Tuple = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase : Optional[Any] = mask_time_prob
_lowerCamelCase : List[Any] = mask_time_length
_lowerCamelCase : List[Any] = mask_time_min_masks
_lowerCamelCase : Tuple = mask_feature_prob
_lowerCamelCase : Optional[Any] = mask_feature_length
_lowerCamelCase : Dict = mask_feature_min_masks
# ctc loss
_lowerCamelCase : Tuple = ctc_loss_reduction
_lowerCamelCase : str = ctc_zero_infinity
# adapter
_lowerCamelCase : Union[str, Any] = add_adapter
_lowerCamelCase : List[Any] = adapter_kernel_size
_lowerCamelCase : Optional[Any] = adapter_stride
_lowerCamelCase : List[Any] = num_adapter_layers
_lowerCamelCase : int = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCamelCase : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCamelCase : List[str] = list(lowercase )
_lowerCamelCase : Optional[Any] = list(lowercase )
_lowerCamelCase : Any = list(lowercase )
_lowerCamelCase : Optional[Any] = xvector_output_dim
@property
def A_ ( self ):
return math.prod(self.conv_stride ) | 12 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCAmelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCamelCase__ = ['''torch''', '''scipy''']
def __init__( self , *lowercase , **lowercase ):
requires_backends(self , ['torch', 'scipy'] )
@classmethod
def A_ ( cls , *lowercase , **lowercase ):
requires_backends(cls , ['torch', 'scipy'] )
@classmethod
def A_ ( cls , *lowercase , **lowercase ):
requires_backends(cls , ['torch', 'scipy'] )
| 358 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
lowercase__ = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """facebook/nllb-200-distilled-600M"""
lowerCamelCase__ = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
lowerCamelCase__ = """translator"""
lowerCamelCase__ = AutoTokenizer
lowerCamelCase__ = AutoModelForSeqaSeqLM
lowerCamelCase__ = LANGUAGE_CODES
lowerCamelCase__ = ["""text""", """text""", """text"""]
lowerCamelCase__ = ["""text"""]
def A_ ( self , lowercase , lowercase , lowercase ):
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''' )
_lowerCamelCase : str = self.lang_to_code[src_lang]
_lowerCamelCase : int = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowercase , return_tensors='pt' , src_lang=lowercase , tgt_lang=lowercase )
def A_ ( self , lowercase ):
return self.model.generate(**lowercase )
def A_ ( self , lowercase ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowercase ) | 12 | 0 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowercase__ = Lock()
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(lowercase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_lowerCamelCase : int = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_lowerCamelCase : Dict = min(lowercase__ , lowercase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(lowercase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_lowerCamelCase : List[str] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_lowerCamelCase : List[Any] = max(lowercase__ , lowercase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(lowercase__ )
def _snake_case ( lowercase__ ):
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : List[Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_lowerCamelCase : List[Any] = Pipe()
_lowerCamelCase : Union[str, Any] = Pipe()
process_array_.append(
Process(
target=lowercase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_lowerCamelCase : Optional[int] = temp_rs
_lowerCamelCase : str = temp_rr
for i in range(1 , len(lowercase__ ) - 1 ):
_lowerCamelCase : Any = Pipe()
_lowerCamelCase : List[Any] = Pipe()
process_array_.append(
Process(
target=lowercase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_lowerCamelCase : List[Any] = temp_rs
_lowerCamelCase : Union[str, Any] = temp_rr
process_array_.append(
Process(
target=lowercase__ , args=(
len(lowercase__ ) - 1,
arr[len(lowercase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(lowercase__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(lowercase__ ) ):
_lowerCamelCase : str = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def _snake_case ( ):
_lowerCamelCase : Union[str, Any] = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*lowercase__ )
_lowerCamelCase : Union[str, Any] = odd_even_transposition(lowercase__ )
print('Sorted List\n' )
print(*lowercase__ )
if __name__ == "__main__":
main() | 359 |
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def A_ ( self , lowercase , lowercase , lowercase ):
_lowerCamelCase : Optional[int] = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
_lowerCamelCase : Tuple = VideoClassificationPipeline(model=lowercase , image_processor=lowercase , top_k=2 )
_lowerCamelCase : List[str] = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def A_ ( self , lowercase , lowercase ):
for example in examples:
_lowerCamelCase : Tuple = video_classifier(lowercase )
self.assertEqual(
lowercase , [
{'score': ANY(lowercase ), 'label': ANY(lowercase )},
{'score': ANY(lowercase ), 'label': ANY(lowercase )},
] , )
@require_torch
def A_ ( self ):
_lowerCamelCase : Optional[Any] = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
_lowerCamelCase : Tuple = VideoMAEFeatureExtractor(
size={'shortest_edge': 10} , crop_size={'height': 10, 'width': 10} )
_lowerCamelCase : Dict = pipeline(
'video-classification' , model=lowercase , feature_extractor=lowercase , frame_sampling_rate=4 )
_lowerCamelCase : Any = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
_lowerCamelCase : Dict = video_classifier(lowercase , top_k=2 )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}] , )
_lowerCamelCase : str = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
[{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}],
[{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}],
] , )
@require_tf
def A_ ( self ):
pass | 12 | 0 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : List[Any] = inspect.getfile(accelerate.test_utils )
_lowerCamelCase : Any = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
_lowerCamelCase : int = test_metrics
@require_cpu
def A_ ( self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def A_ ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def A_ ( self ):
self.test_metrics.main()
@require_multi_gpu
def A_ ( self ):
print(F'''Found {torch.cuda.device_count()} devices.''' )
_lowerCamelCase : List[Any] = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() ) | 360 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase__ = {
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 12 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ , lowercase__ ):
if not len(__snake_case ) == len(__snake_case ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = equationa
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = equationa
# Calculate the determinants of the matrices
_lowerCamelCase : List[str] = aa * ba - aa * ba
_lowerCamelCase : int = ca * ba - ca * ba
_lowerCamelCase : List[str] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_lowerCamelCase : Union[str, Any] = determinant_x / determinant
_lowerCamelCase : Any = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 361 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _snake_case ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None ):
if attention_mask is None:
_lowerCamelCase : List[str] = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = OPTConfig
lowerCamelCase__ = {}
lowerCamelCase__ = """gelu"""
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=16 , lowercase=2 , lowercase=4 , lowercase=4 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , lowercase=16 , lowercase=16 , ):
_lowerCamelCase : Tuple = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : str = is_training
_lowerCamelCase : Optional[int] = use_labels
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[str] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : List[Any] = eos_token_id
_lowerCamelCase : Tuple = pad_token_id
_lowerCamelCase : List[str] = bos_token_id
_lowerCamelCase : Optional[int] = embed_dim
_lowerCamelCase : List[str] = word_embed_proj_dim
_lowerCamelCase : Any = False
def A_ ( self ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowerCamelCase : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowerCamelCase : str = tf.concat([input_ids, eos_tensor] , axis=1 )
_lowerCamelCase : Tuple = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase , **self.config_updates , )
_lowerCamelCase : int = prepare_opt_inputs_dict(lowercase , lowercase )
return config, inputs_dict
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : Optional[Any] = TFOPTModel(config=lowercase )
_lowerCamelCase : Optional[Any] = inputs_dict['input_ids']
_lowerCamelCase : str = input_ids[:1, :]
_lowerCamelCase : Dict = inputs_dict['attention_mask'][:1, :]
_lowerCamelCase : Optional[Any] = 1
# first forward pass
_lowerCamelCase : Any = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
_lowerCamelCase, _lowerCamelCase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCamelCase : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCamelCase : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_lowerCamelCase : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
_lowerCamelCase : Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_lowerCamelCase : Optional[Any] = model(lowercase , attention_mask=lowercase )[0]
_lowerCamelCase : List[str] = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_lowerCamelCase : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_lowerCamelCase : Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
_lowerCamelCase : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
@require_tf
class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowerCamelCase__ = (TFOPTForCausalLM,) if is_tf_available() else ()
lowerCamelCase__ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = 10
def A_ ( self ):
_lowerCamelCase : int = TFOPTModelTester(self )
_lowerCamelCase : Tuple = ConfigTester(self , config_class=lowercase )
def A_ ( self ):
self.config_tester.run_common_tests()
def A_ ( self ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowercase , lowercase ):
if hasattr(lowercase , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowercase , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
_lowerCamelCase : Optional[int] = model_class(config=lowercase )
_lowerCamelCase : int = _get_word_embedding_weight(lowercase , model.get_input_embeddings() )
_lowerCamelCase : Tuple = _get_word_embedding_weight(lowercase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowercase )
_lowerCamelCase : str = _get_word_embedding_weight(lowercase , model.get_input_embeddings() )
_lowerCamelCase : Any = _get_word_embedding_weight(lowercase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_lowerCamelCase : Union[str, Any] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowercase )
# check that weights remain the same after resizing
_lowerCamelCase : int = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_lowerCamelCase : Optional[Any] = False
self.assertTrue(lowercase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowercase )
_lowerCamelCase : Dict = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_lowerCamelCase : Union[str, Any] = False
self.assertTrue(lowercase )
def _snake_case ( lowercase__ ):
return tf.constant(lowercase__ , dtype=tf.intaa )
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = 99
def A_ ( self ):
_lowerCamelCase : Tuple = tf.ones((4, 1) , dtype=tf.intaa ) * 2
_lowerCamelCase : Tuple = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
_lowerCamelCase : int = input_ids.shape[0]
_lowerCamelCase : List[Any] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A_ ( self ):
_lowerCamelCase : Tuple = TFOPTModel.from_pretrained('facebook/opt-350m' )
_lowerCamelCase : List[Any] = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_lowerCamelCase : List[str] = tf.not_equal(lowercase , model.config.pad_token_id )
with tf.GradientTape():
_lowerCamelCase : List[str] = model(input_ids=lowercase , attention_mask=lowercase ).last_hidden_state
_lowerCamelCase : Optional[Any] = (1, 11, 512)
self.assertEqual(output.shape , lowercase )
_lowerCamelCase : List[str] = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-3 ) )
_lowerCamelCase : List[str] = tf.function(lowercase , jit_compile=lowercase )
_lowerCamelCase : Union[str, Any] = xla_generate(lowercase , lowercase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-2 ) )
@require_tf
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
super().setUp()
_lowerCamelCase : List[Any] = 'facebook/opt-350m'
def A_ ( self ):
_lowerCamelCase : int = TFOPTForCausalLM.from_pretrained(self.path_model )
_lowerCamelCase : List[Any] = GPTaTokenizer.from_pretrained(self.path_model )
_lowerCamelCase : List[str] = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_lowerCamelCase : List[Any] = tokenizer(lowercase , return_tensors='tf' , padding=lowercase , add_special_tokens=lowercase )
_lowerCamelCase : Optional[int] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
_lowerCamelCase : Any = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) )
_lowerCamelCase : Tuple = tf.function(lowercase , jit_compile=lowercase )
_lowerCamelCase : List[Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) )
@require_tf
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def A_ ( self ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def A_ ( self ):
_lowerCamelCase : str = 'facebook/opt-125m'
_lowerCamelCase : Dict = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Optional[int] = GPTaTokenizer.from_pretrained(lowercase )
_lowerCamelCase : Dict = TFOPTForCausalLM.from_pretrained(lowercase )
for prompt in self.prompts:
_lowerCamelCase : int = tokenizer(lowercase , return_tensors='tf' ).input_ids
_lowerCamelCase : int = model.generate(lowercase , max_length=10 )
_lowerCamelCase : Any = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
predicted_outputs += generated_string
self.assertListEqual(lowercase , lowercase )
def A_ ( self ):
_lowerCamelCase : List[Any] = 'facebook/opt-350m'
_lowerCamelCase : int = GPTaTokenizer.from_pretrained(lowercase )
_lowerCamelCase : Optional[int] = TFOPTForCausalLM.from_pretrained(lowercase )
_lowerCamelCase : Any = 'left'
# use different length sentences to test batching
_lowerCamelCase : Optional[int] = [
'Hello, my dog is a little',
'Today, I',
]
_lowerCamelCase : Dict = tokenizer(lowercase , return_tensors='tf' , padding=lowercase )
_lowerCamelCase : int = inputs['input_ids']
_lowerCamelCase : Tuple = model.generate(input_ids=lowercase , attention_mask=inputs['attention_mask'] )
_lowerCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
_lowerCamelCase : Union[str, Any] = model.generate(input_ids=lowercase )
_lowerCamelCase : Dict = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
_lowerCamelCase : int = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
_lowerCamelCase : Union[str, Any] = model.generate(input_ids=lowercase , max_length=model.config.max_length - num_paddings )
_lowerCamelCase : List[Any] = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
_lowerCamelCase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase )
_lowerCamelCase : Optional[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase )
_lowerCamelCase : Optional[Any] = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , [non_padded_sentence, padded_sentence] )
def A_ ( self ):
_lowerCamelCase : Tuple = 'facebook/opt-350m'
_lowerCamelCase : List[Any] = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Optional[Any] = GPTaTokenizer.from_pretrained(lowercase )
_lowerCamelCase : Optional[Any] = TFOPTForCausalLM.from_pretrained(lowercase )
for prompt in self.prompts:
_lowerCamelCase : List[Any] = tokenizer(lowercase , return_tensors='tf' ).input_ids
_lowerCamelCase : Optional[Any] = model.generate(lowercase , max_length=10 )
_lowerCamelCase : Dict = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
predicted_outputs += generated_string
self.assertListEqual(lowercase , lowercase ) | 12 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.