code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__lowerCAmelCase = Lock()
def UpperCAmelCase_ (__a : Union[str, Any] , __a : Optional[Any] , __a : Optional[int] , __a : Optional[int] , __a : List[Any] , __a : Union[str, Any] , __a : Union[str, Any] ):
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__a )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_a : List[Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_a : List[str] = min(__a , __a )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__a )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_a : Dict = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_a : Union[str, Any] = max(__a , __a )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__a )
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : int = []
_a : List[Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_a : Dict = Pipe()
_a : Optional[Any] = Pipe()
process_array_.append(
Process(
target=__a , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_a : List[Any] = temp_rs
_a : Optional[int] = temp_rr
for i in range(1 , len(__a ) - 1 ):
_a : int = Pipe()
_a : List[str] = Pipe()
process_array_.append(
Process(
target=__a , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_a : List[str] = temp_rs
_a : str = temp_rr
process_array_.append(
Process(
target=__a , args=(
len(__a ) - 1,
arr[len(__a ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__a ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__a ) ):
_a : List[str] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Optional[int] = list(range(1_0 , 0 , -1 ) )
print('Initial List' )
print(*__a )
_a : Tuple = odd_even_transposition(__a )
print('Sorted List\n' )
print(*__a )
if __name__ == "__main__":
main()
| 5 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ (__a : str = "https://www.worldometers.info/coronavirus" ):
"""simple docstring"""
_a : List[str] = BeautifulSoup(requests.get(__a ).text , 'html.parser' )
_a : Dict = soup.findAll('h1' )
_a : Union[str, Any] = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__a , __a )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5 | 1 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__lowerCAmelCase = logging.getLogger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = '''sequence-classification'''
def __init__( self : Dict ,_a : Optional[Any] ):
'''simple docstring'''
if type(_a ) == dict:
_a : Union[str, Any] = Namespace(**_a )
_a : List[Any] = glue_output_modes[hparams.task]
_a : Union[str, Any] = glue_tasks_num_labels[hparams.task]
super().__init__(_a ,_a ,self.mode )
def __lowercase ( self : Optional[int] ,**_a : Dict ):
'''simple docstring'''
return self.model(**_a )
def __lowercase ( self : str ,_a : Dict ,_a : Union[str, Any] ):
'''simple docstring'''
_a : int = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_a : Optional[Any] = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_a : str = self(**_a )
_a : Dict = outputs[0]
_a : List[Any] = self.trainer.lr_schedulers[0]['scheduler']
_a : Union[str, Any] = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __lowercase ( self : int ):
'''simple docstring'''
_a : Any = self.hparams
_a : Any = processors[args.task]()
_a : Any = processor.get_labels()
for mode in ["train", "dev"]:
_a : Union[str, Any] = self._feature_file(_a )
if os.path.exists(_a ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' ,_a )
else:
logger.info('Creating features from dataset file at %s' ,args.data_dir )
_a : Union[str, Any] = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_a : Dict = convert_examples_to_features(
_a ,self.tokenizer ,max_length=args.max_seq_length ,label_list=self.labels ,output_mode=args.glue_output_mode ,)
logger.info('Saving features into cached file %s' ,_a )
torch.save(_a ,_a )
def __lowercase ( self : Dict ,_a : str ,_a : int ,_a : bool = False ):
'''simple docstring'''
_a : Tuple = 'dev' if mode == 'test' else mode
_a : Union[str, Any] = self._feature_file(_a )
logger.info('Loading features from cached file %s' ,_a )
_a : str = torch.load(_a )
_a : List[str] = torch.tensor([f.input_ids for f in features] ,dtype=torch.long )
_a : Tuple = torch.tensor([f.attention_mask for f in features] ,dtype=torch.long )
_a : List[str] = torch.tensor([f.token_type_ids for f in features] ,dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_a : Optional[int] = torch.tensor([f.label for f in features] ,dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_a : Union[str, Any] = torch.tensor([f.label for f in features] ,dtype=torch.float )
return DataLoader(
TensorDataset(_a ,_a ,_a ,_a ) ,batch_size=_a ,shuffle=_a ,)
def __lowercase ( self : Any ,_a : Tuple ,_a : Union[str, Any] ):
'''simple docstring'''
_a : str = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_a : Any = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_a : str = self(**_a )
_a, _a : List[str] = outputs[:2]
_a : int = logits.detach().cpu().numpy()
_a : Dict = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __lowercase ( self : Tuple ,_a : Dict ):
'''simple docstring'''
_a : Tuple = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_a : Union[str, Any] = np.concatenate([x['pred'] for x in outputs] ,axis=0 )
if self.hparams.glue_output_mode == "classification":
_a : Any = np.argmax(_a ,axis=1 )
elif self.hparams.glue_output_mode == "regression":
_a : int = np.squeeze(_a )
_a : Optional[Any] = np.concatenate([x['target'] for x in outputs] ,axis=0 )
_a : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
_a : int = [[] for _ in range(out_label_ids.shape[0] )]
_a : Dict = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task ,_a ,_a )}
_a : Optional[int] = dict(results.items() )
_a : List[str] = results
return ret, preds_list, out_label_list
def __lowercase ( self : List[Any] ,_a : list ):
'''simple docstring'''
_a, _a, _a : List[str] = self._eval_end(_a )
_a : Optional[Any] = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __lowercase ( self : Tuple ,_a : Optional[int] ):
'''simple docstring'''
_a, _a, _a : Tuple = self._eval_end(_a )
_a : Union[str, Any] = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __lowercase ( _a : Optional[Any] ,_a : int ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(_a ,_a )
parser.add_argument(
'--max_seq_length' ,default=128 ,type=_a ,help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) ,)
parser.add_argument(
'--task' ,default='' ,type=_a ,required=_a ,help='The GLUE task to run' ,)
parser.add_argument(
'--gpus' ,default=0 ,type=_a ,help='The number of GPUs allocated for this, it is by default 0 meaning none' ,)
parser.add_argument(
'--overwrite_cache' ,action='store_true' ,help='Overwrite the cached training and evaluation sets' )
return parser
def UpperCAmelCase_ ():
"""simple docstring"""
_a : str = argparse.ArgumentParser()
add_generic_args(__a , os.getcwd() )
_a : Union[str, Any] = GLUETransformer.add_model_specific_args(__a , os.getcwd() )
_a : Dict = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_a : Tuple = os.path.join(
'./results' , f"""{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}""" , )
os.makedirs(args.output_dir )
_a : str = GLUETransformer(__a )
_a : Dict = generic_train(__a , __a )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_a : int = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=__a ) )
_a : Optional[Any] = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__a )
if __name__ == "__main__":
main()
| 5 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
__lowerCAmelCase = """docs/source/en/_toctree.yml"""
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Any = defaultdict(__a )
for doc in model_doc:
counts[doc["local"]] += 1
_a : List[str] = [key for key, value in counts.items() if value > 1]
_a : str = []
for duplicate_key in duplicates:
_a : Union[str, Any] = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__a , key=lambda __a : s["title"].lower() )
def UpperCAmelCase_ (__a : Optional[int]=False ):
"""simple docstring"""
with open(__a , encoding='utf-8' ) as f:
_a : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
_a : Union[str, Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_a : Union[str, Any] = content[api_idx]['sections']
# Then to the model doc
_a : List[str] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_a : List[str] = api_doc[model_idx]['sections']
_a : List[Any] = [(idx, section) for idx, section in enumerate(__a ) if 'sections' in section]
_a : Tuple = False
for idx, modality_doc in modalities_docs:
_a : List[Any] = modality_doc['sections']
_a : Any = clean_model_doc_toc(__a )
if old_modality_doc != new_modality_doc:
_a : Union[str, Any] = True
if overwrite:
_a : str = new_modality_doc
if diff:
if overwrite:
_a : Dict = model_doc
_a : Dict = api_doc
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__lowerCAmelCase = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 5 | 1 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__lowerCAmelCase = logging.getLogger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : int ,_a : List[Any]=-1 ):
'''simple docstring'''
_a : Optional[Any] = label_idx
def __lowercase ( self : List[Any] ,_a : str ,_a : Union[Split, str] ):
'''simple docstring'''
if isinstance(_a ,_a ):
_a : Optional[int] = mode.value
_a : Optional[int] = os.path.join(_a ,F"""{mode}.txt""" )
_a : List[str] = 1
_a : int = []
with open(_a ,encoding='utf-8' ) as f:
_a : Dict = []
_a : Optional[int] = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" ,words=_a ,labels=_a ) )
guid_index += 1
_a : Optional[int] = []
_a : Optional[int] = []
else:
_a : Optional[int] = line.split(' ' )
words.append(splits[0] )
if len(_a ) > 1:
labels.append(splits[self.label_idx].replace('\n' ,'' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" ,words=_a ,labels=_a ) )
return examples
def __lowercase ( self : Optional[Any] ,_a : TextIO ,_a : TextIO ,_a : List ):
'''simple docstring'''
_a : Any = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(_a )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_a : List[Any] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(_a )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' ,line.split()[0] )
def __lowercase ( self : Any ,_a : str ):
'''simple docstring'''
if path:
with open(_a ,'r' ) as f:
_a : str = f.read().splitlines()
if "O" not in labels:
_a : Optional[int] = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : str ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def __lowercase ( self : str ,_a : str ):
'''simple docstring'''
if path:
with open(_a ,'r' ) as f:
_a : Optional[int] = f.read().splitlines()
if "O" not in labels:
_a : Tuple = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __lowercase ( self : Any ,_a : List[Any] ,_a : Union[Split, str] ):
'''simple docstring'''
if isinstance(_a ,_a ):
_a : Union[str, Any] = mode.value
_a : List[Any] = os.path.join(_a ,F"""{mode}.txt""" )
_a : int = 1
_a : Optional[Any] = []
with open(_a ,encoding='utf-8' ) as f:
for sentence in parse_incr(_a ):
_a : Any = []
_a : Optional[Any] = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(_a ) == len(_a )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" ,words=_a ,labels=_a ) )
guid_index += 1
return examples
def __lowercase ( self : Tuple ,_a : TextIO ,_a : TextIO ,_a : List ):
'''simple docstring'''
_a : Any = 0
for sentence in parse_incr(_a ):
_a : List[str] = preds_list[example_id]
_a : Optional[Any] = ''
for token in sentence:
out += F"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """
out += "\n"
writer.write(_a )
example_id += 1
def __lowercase ( self : Optional[Any] ,_a : str ):
'''simple docstring'''
if path:
with open(_a ,'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 5 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) != 3_2:
raise ValueError('Input must be of length 32' )
_a : Any = b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '08x' )[-8:]
_a : str = b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : List[Any] = b''
for char in message:
bit_string += format(__a , '08b' ).encode('utf-8' )
_a : int = format(len(__a ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__a ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(__a ) , 5_1_2 ):
_a : List[Any] = bit_string[pos : pos + 5_1_2]
_a : str = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '032b' )
_a : int = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__a , 2 )
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
return (a + b) % 2**3_2
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : str = preprocess(__a )
_a : Optional[int] = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
_a : int = 0x67_45_23_01
_a : Union[str, Any] = 0xEF_CD_AB_89
_a : str = 0x98_BA_DC_FE
_a : List[Any] = 0x10_32_54_76
_a : Optional[int] = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__a ):
_a : Union[str, Any] = aa
_a : List[Any] = ba
_a : List[Any] = ca
_a : Dict = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a : Optional[int] = d ^ (b & (c ^ d))
_a : Optional[Any] = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a : Optional[Any] = c ^ (d & (b ^ c))
_a : Dict = (5 * i + 1) % 1_6
elif i <= 4_7:
_a : Optional[Any] = b ^ c ^ d
_a : Dict = (3 * i + 5) % 1_6
else:
_a : int = c ^ (b | not_aa(__a ))
_a : List[str] = (7 * i) % 1_6
_a : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**3_2
_a : Union[str, Any] = d
_a : Tuple = c
_a : Optional[int] = b
_a : Union[str, Any] = sum_aa(__a , left_rotate_aa(__a , shift_amounts[i] ) )
# Add hashed chunk to running total
_a : Any = sum_aa(__a , __a )
_a : Dict = sum_aa(__a , __a )
_a : Union[str, Any] = sum_aa(__a , __a )
_a : str = sum_aa(__a , __a )
_a : Optional[Any] = reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 1 |
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
__lowerCAmelCase = datasets.logging.get_logger(__name__)
__lowerCAmelCase = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
__lowerCAmelCase = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
__lowerCAmelCase = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://unbabel.github.io/COMET/html/index.html' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'sources': datasets.Value('string' ,id='sequence' ),
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/Unbabel/COMET'] ,reference_urls=[
'https://github.com/Unbabel/COMET',
'https://www.aclweb.org/anthology/2020.emnlp-main.213/',
'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6',
] ,)
def __lowercase ( self : int ,_a : int ):
'''simple docstring'''
if self.config_name == "default":
_a : List[Any] = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) )
else:
_a : List[str] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __lowercase ( self : Tuple ,_a : List[Any] ,_a : Dict ,_a : Optional[Any] ,_a : List[str]=None ,_a : Tuple=False ):
'''simple docstring'''
if gpus is None:
_a : str = 1 if torch.cuda.is_available() else 0
_a : Optional[Any] = {'src': sources, 'mt': predictions, 'ref': references}
_a : Optional[Any] = [dict(zip(_a ,_a ) ) for t in zip(*data.values() )]
_a, _a : Tuple = self.scorer.predict(_a ,gpus=_a ,progress_bar=_a )
return {"mean_score": mean_score, "scores": scores}
| 5 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase_ (__a : str , __a : Dict=0.999 , __a : List[str]="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__a : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__a : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_a : Tuple = []
for i in range(__a ):
_a : Union[str, Any] = i / num_diffusion_timesteps
_a : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__a ) / alpha_bar_fn(__a ) , __a ) )
return torch.tensor(__a , dtype=torch.floataa )
class UpperCAmelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Dict = 2
@register_to_config
def __init__( self : str ,_a : int = 1000 ,_a : float = 0.0_0085 ,_a : float = 0.012 ,_a : str = "linear" ,_a : Optional[Union[np.ndarray, List[float]]] = None ,_a : str = "epsilon" ,_a : Optional[bool] = False ,_a : Optional[bool] = False ,_a : float = 1.0 ,_a : str = "linspace" ,_a : int = 0 ,):
'''simple docstring'''
if trained_betas is not None:
_a : List[str] = torch.tensor(_a ,dtype=torch.floataa )
elif beta_schedule == "linear":
_a : Tuple = torch.linspace(_a ,_a ,_a ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a : List[str] = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,_a ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a : Dict = betas_for_alpha_bar(_a ,alpha_transform_type='cosine' )
elif beta_schedule == "exp":
_a : Tuple = betas_for_alpha_bar(_a ,alpha_transform_type='exp' )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_a : Optional[Any] = 1.0 - self.betas
_a : Optional[int] = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(_a ,_a ,_a )
_a : Optional[int] = use_karras_sigmas
def __lowercase ( self : Any ,_a : Union[str, Any] ,_a : Optional[Any]=None ):
'''simple docstring'''
if schedule_timesteps is None:
_a : List[Any] = self.timesteps
_a : Dict = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a : int = 1 if len(_a ) > 1 else 0
else:
_a : str = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
_a : str = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Union[float, torch.FloatTensor] ,):
'''simple docstring'''
_a : List[Any] = self.index_for_timestep(_a )
_a : Tuple = self.sigmas[step_index]
_a : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowercase ( self : Any ,_a : int ,_a : Union[str, torch.device] = None ,_a : Optional[int] = None ,):
'''simple docstring'''
_a : Optional[Any] = num_inference_steps
_a : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a : Optional[Any] = np.linspace(0 ,num_train_timesteps - 1 ,_a ,dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a : str = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : int = (np.arange(0 ,_a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a : Any = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : Union[str, Any] = (np.arange(_a ,0 ,-step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_a : Tuple = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a : Union[str, Any] = np.log(_a )
_a : str = np.interp(_a ,np.arange(0 ,len(_a ) ) ,_a )
if self.config.use_karras_sigmas:
_a : List[Any] = self._convert_to_karras(in_sigmas=_a ,num_inference_steps=self.num_inference_steps )
_a : Dict = np.array([self._sigma_to_t(_a ,_a ) for sigma in sigmas] )
_a : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a : Union[str, Any] = torch.from_numpy(_a ).to(device=_a )
_a : Any = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_a : List[Any] = torch.from_numpy(_a )
_a : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_a ).startswith('mps' ):
# mps does not support float64
_a : Tuple = timesteps.to(_a ,dtype=torch.floataa )
else:
_a : Dict = timesteps.to(device=_a )
# empty dt and derivative
_a : Tuple = None
_a : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a : Union[str, Any] = defaultdict(_a )
def __lowercase ( self : str ,_a : Dict ,_a : Dict ):
'''simple docstring'''
_a : Optional[int] = np.log(_a )
# get distribution
_a : Union[str, Any] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_a : List[Any] = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_a : Tuple = low_idx + 1
_a : Union[str, Any] = log_sigmas[low_idx]
_a : Optional[Any] = log_sigmas[high_idx]
# interpolate sigmas
_a : Optional[Any] = (low - log_sigma) / (low - high)
_a : List[str] = np.clip(_a ,0 ,1 )
# transform interpolation to time range
_a : Union[str, Any] = (1 - w) * low_idx + w * high_idx
_a : List[str] = t.reshape(sigma.shape )
return t
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Tuple ):
'''simple docstring'''
_a : float = in_sigmas[-1].item()
_a : float = in_sigmas[0].item()
_a : Tuple = 7.0 # 7.0 is the value used in the paper
_a : str = np.linspace(0 ,1 ,_a )
_a : Optional[Any] = sigma_min ** (1 / rho)
_a : Union[str, Any] = sigma_max ** (1 / rho)
_a : str = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return self.dt is None
def __lowercase ( self : int ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : Union[float, torch.FloatTensor] ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : bool = True ,):
'''simple docstring'''
_a : Union[str, Any] = self.index_for_timestep(_a )
# advance index counter by 1
_a : Any = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a : Tuple = self.sigmas[step_index]
_a : int = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_a : List[str] = self.sigmas[step_index - 1]
_a : List[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a : Optional[int] = 0
_a : Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a : Dict = sigma_hat if self.state_in_first_order else sigma_next
_a : Optional[int] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a : List[Any] = sigma_hat if self.state_in_first_order else sigma_next
_a : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_a : Union[str, Any] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
_a : Optional[int] = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a : Optional[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a : Any = sigma_next - sigma_hat
# store for 2nd order step
_a : int = derivative
_a : List[str] = dt
_a : Union[str, Any] = sample
else:
# 2. 2nd order / Heun's method
_a : Dict = (sample - pred_original_sample) / sigma_next
_a : Tuple = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_a : Optional[Any] = self.dt
_a : Union[str, Any] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_a : List[Any] = None
_a : Union[str, Any] = None
_a : Dict = None
_a : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __lowercase ( self : Optional[int] ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,):
'''simple docstring'''
_a : str = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
_a : Dict = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
_a : Optional[Any] = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
_a : int = self.timesteps.to(original_samples.device )
_a : Optional[Any] = timesteps.to(original_samples.device )
_a : Any = [self.index_for_timestep(_a ,_a ) for t in timesteps]
_a : Optional[int] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a : Optional[Any] = sigma.unsqueeze(-1 )
_a : Any = original_samples + noise * sigma
return noisy_samples
def __len__( self : Optional[int] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 5 | 1 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def UpperCAmelCase_ (__a : Dict , __a : bool = True , __a : float = math.inf , __a : float = -math.inf , __a : float = math.inf , __a : float = -math.inf , __a : bool = False , __a : float = 1_0_0 , __a : float = 0.01 , __a : float = 1 , ):
"""simple docstring"""
_a : Dict = False
_a : List[Any] = search_prob
_a : Optional[Any] = start_temperate
_a : str = []
_a : List[Any] = 0
_a : str = None
while not search_end:
_a : Tuple = current_state.score()
if best_state is None or current_score > best_state.score():
_a : List[str] = current_state
scores.append(__a )
iterations += 1
_a : str = None
_a : Any = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_a : Optional[Any] = random.randint(0 , len(__a ) - 1 ) # picking a random neighbor
_a : str = neighbors.pop(__a )
_a : Optional[Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_a : str = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_a : List[Any] = picked_neighbor
else:
_a : Any = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_a : Union[str, Any] = picked_neighbor
_a : str = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_a : int = True
else:
_a : int = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__a ) , __a )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def UpperCAmelCase_ (__a : str , __a : Optional[Any] ):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
__lowerCAmelCase = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
__lowerCAmelCase = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
__lowerCAmelCase = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
__lowerCAmelCase = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def UpperCAmelCase_ (__a : Any , __a : Dict ):
"""simple docstring"""
return (3 * x**2) - (6 * y)
__lowerCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__lowerCAmelCase = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
f'''{local_min.score()}'''
)
__lowerCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__lowerCAmelCase = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
f'''{local_min.score()}'''
)
| 5 |
'''simple docstring'''
import qiskit
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
_a : Any = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_a : List[Any] = qiskit.QuantumCircuit(__a , __a )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_a : Tuple = qiskit.execute(__a , __a , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__a )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 5 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Optional[Any] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_a : List[str] = [1_4_4, 1_9_2, 2_4_0]
_a : int = [1_6, 3_2, 6_4, 9_6, 1_2_8, 1_6_0, 6_4_0]
elif "mobilevit_xs" in mobilevit_name:
_a : Optional[Any] = [9_6, 1_2_0, 1_4_4]
_a : int = [1_6, 3_2, 4_8, 6_4, 8_0, 9_6, 3_8_4]
elif "mobilevit_xxs" in mobilevit_name:
_a : Any = [6_4, 8_0, 9_6]
_a : List[Any] = [1_6, 1_6, 2_4, 4_8, 6_4, 8_0, 3_2_0]
_a : List[str] = 0.05
_a : Any = 2.0
if mobilevit_name.startswith('deeplabv3_' ):
_a : Union[str, Any] = 5_1_2
_a : Any = 1_6
_a : Optional[Any] = 2_1
_a : Optional[Any] = 'pascal-voc-id2label.json'
else:
_a : Tuple = 1_0_0_0
_a : Optional[int] = 'imagenet-1k-id2label.json'
_a : Any = 'huggingface/label-files'
_a : Dict = json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
_a : List[str] = {int(__a ): v for k, v in idalabel.items()}
_a : List[str] = idalabel
_a : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ (__a : str , __a : str=False ):
"""simple docstring"""
for i in range(1 , 6 ):
if f"""layer_{i}.""" in name:
_a : Any = name.replace(f"""layer_{i}.""" , f"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
_a : Any = name.replace('conv_1.' , 'conv_stem.' )
if ".block." in name:
_a : Any = name.replace('.block.' , '.' )
if "exp_1x1" in name:
_a : Tuple = name.replace('exp_1x1' , 'expand_1x1' )
if "red_1x1" in name:
_a : List[str] = name.replace('red_1x1' , 'reduce_1x1' )
if ".local_rep.conv_3x3." in name:
_a : Optional[int] = name.replace('.local_rep.conv_3x3.' , '.conv_kxk.' )
if ".local_rep.conv_1x1." in name:
_a : Tuple = name.replace('.local_rep.conv_1x1.' , '.conv_1x1.' )
if ".norm." in name:
_a : Optional[Any] = name.replace('.norm.' , '.normalization.' )
if ".conv." in name:
_a : Union[str, Any] = name.replace('.conv.' , '.convolution.' )
if ".conv_proj." in name:
_a : Tuple = name.replace('.conv_proj.' , '.conv_projection.' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if f""".{i}.{j}.""" in name:
_a : List[Any] = name.replace(f""".{i}.{j}.""" , f""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if f""".{i}.{j}.""" in name:
_a : Tuple = name.replace(f""".{i}.{j}.""" , f""".{i}.""" )
if "expand_1x1" in name:
_a : List[str] = name.replace('expand_1x1' , 'downsampling_layer.expand_1x1' )
if "conv_3x3" in name:
_a : List[str] = name.replace('conv_3x3' , 'downsampling_layer.conv_3x3' )
if "reduce_1x1" in name:
_a : str = name.replace('reduce_1x1' , 'downsampling_layer.reduce_1x1' )
for i in range(2 , 5 ):
if f""".global_rep.{i}.weight""" in name:
_a : Optional[int] = name.replace(f""".global_rep.{i}.weight""" , '.layernorm.weight' )
if f""".global_rep.{i}.bias""" in name:
_a : int = name.replace(f""".global_rep.{i}.bias""" , '.layernorm.bias' )
if ".global_rep." in name:
_a : int = name.replace('.global_rep.' , '.transformer.' )
if ".pre_norm_mha.0." in name:
_a : Optional[int] = name.replace('.pre_norm_mha.0.' , '.layernorm_before.' )
if ".pre_norm_mha.1.out_proj." in name:
_a : str = name.replace('.pre_norm_mha.1.out_proj.' , '.attention.output.dense.' )
if ".pre_norm_ffn.0." in name:
_a : List[Any] = name.replace('.pre_norm_ffn.0.' , '.layernorm_after.' )
if ".pre_norm_ffn.1." in name:
_a : Dict = name.replace('.pre_norm_ffn.1.' , '.intermediate.dense.' )
if ".pre_norm_ffn.4." in name:
_a : List[Any] = name.replace('.pre_norm_ffn.4.' , '.output.dense.' )
if ".transformer." in name:
_a : List[str] = name.replace('.transformer.' , '.transformer.layer.' )
if ".aspp_layer." in name:
_a : Any = name.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in name:
_a : List[Any] = name.replace('.aspp_pool.' , '.' )
if "seg_head." in name:
_a : str = name.replace('seg_head.' , 'segmentation_head.' )
if "segmentation_head.classifier.classifier." in name:
_a : Optional[Any] = name.replace('segmentation_head.classifier.classifier.' , 'segmentation_head.classifier.' )
if "classifier.fc." in name:
_a : Optional[Any] = name.replace('classifier.fc.' , 'classifier.' )
elif (not base_model) and ("segmentation_head." not in name):
_a : Optional[Any] = 'mobilevit.' + name
return name
def UpperCAmelCase_ (__a : List[str] , __a : Optional[Any] , __a : Tuple=False ):
"""simple docstring"""
if base_model:
_a : Tuple = ''
else:
_a : Union[str, Any] = 'mobilevit.'
for key in orig_state_dict.copy().keys():
_a : List[Any] = orig_state_dict.pop(__a )
if key[:8] == "encoder.":
_a : Union[str, Any] = key[8:]
if "qkv" in key:
_a : Any = key.split('.' )
_a : str = int(key_split[0][6:] ) - 1
_a : Any = int(key_split[3] )
_a : Any = model.get_submodule(f"""{model_prefix}encoder.layer.{layer_num}""" )
_a : Union[str, Any] = layer.transformer.layer[transformer_num].attention.attention.all_head_size
_a : List[Any] = (
f"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
_a : Dict = val[:dim, :]
_a : Optional[int] = val[dim : dim * 2, :]
_a : Any = val[-dim:, :]
else:
_a : Any = val[:dim]
_a : List[Any] = val[dim : dim * 2]
_a : Dict = val[-dim:]
else:
_a : Tuple = val
return orig_state_dict
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_a : int = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ (__a : Union[str, Any] , __a : Optional[int] , __a : Tuple , __a : str=False ):
"""simple docstring"""
_a : Dict = get_mobilevit_config(__a )
# load original state_dict
_a : Dict = torch.load(__a , map_location='cpu' )
# load 🤗 model
if mobilevit_name.startswith('deeplabv3_' ):
_a : Any = MobileViTForSemanticSegmentation(__a ).eval()
else:
_a : List[str] = MobileViTForImageClassification(__a ).eval()
_a : Union[str, Any] = convert_state_dict(__a , __a )
model.load_state_dict(__a )
# Check outputs on an image, prepared by MobileViTImageProcessor
_a : Optional[int] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 3_2 )
_a : str = image_processor(images=prepare_img() , return_tensors='pt' )
_a : Optional[Any] = model(**__a )
_a : Dict = outputs.logits
if mobilevit_name.startswith('deeplabv3_' ):
assert logits.shape == (1, 2_1, 3_2, 3_2)
if mobilevit_name == "deeplabv3_mobilevit_s":
_a : Optional[int] = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_a : Union[str, Any] = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_a : Dict = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , __a , atol=1e-4 )
else:
assert logits.shape == (1, 1_0_0_0)
if mobilevit_name == "mobilevit_s":
_a : Optional[int] = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
_a : Optional[int] = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
_a : Any = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , __a , atol=1e-4 )
Path(__a ).mkdir(exist_ok=__a )
print(f"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__a )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__a )
if push_to_hub:
_a : Tuple = {
'mobilevit_s': 'mobilevit-small',
'mobilevit_xs': 'mobilevit-x-small',
'mobilevit_xxs': 'mobilevit-xx-small',
'deeplabv3_mobilevit_s': 'deeplabv3-mobilevit-small',
'deeplabv3_mobilevit_xs': 'deeplabv3-mobilevit-x-small',
'deeplabv3_mobilevit_xxs': 'deeplabv3-mobilevit-xx-small',
}
print('Pushing to the hub...' )
_a : Optional[int] = model_mapping[mobilevit_name]
image_processor.push_to_hub(__a , organization='apple' )
model.push_to_hub(__a , organization='apple' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__lowerCAmelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 5 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def __lowercase ( self : str ):
'''simple docstring'''
_a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_a : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_a : List[str] = 'xvjiarui/stable-diffusion-2-inpainting'
_a, _a : str = FlaxStableDiffusionInpaintPipeline.from_pretrained(_a ,safety_checker=_a )
_a : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
_a : int = jax.random.PRNGKey(0 )
_a : Tuple = 50
_a : Any = jax.device_count()
_a : Dict = num_samples * [prompt]
_a : Optional[Any] = num_samples * [init_image]
_a : str = num_samples * [mask_image]
_a, _a, _a : Optional[Any] = pipeline.prepare_inputs(_a ,_a ,_a )
# shard inputs and rng
_a : Optional[Any] = replicate(_a )
_a : str = jax.random.split(_a ,jax.device_count() )
_a : Dict = shard(_a )
_a : int = shard(_a )
_a : int = shard(_a )
_a : Union[str, Any] = pipeline(
_a ,_a ,_a ,_a ,_a ,_a ,jit=_a )
_a : Union[str, Any] = output.images.reshape(_a ,512 ,512 ,3 )
_a : Union[str, Any] = images[0, 253:256, 253:256, -1]
_a : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_a : Union[str, Any] = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 5 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = CTRLTokenizer
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Union[str, Any] = False
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a : Union[str, Any] = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
_a : str = dict(zip(_a ,range(len(_a ) ) ) )
_a : int = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
_a : Union[str, Any] = {'unk_token': '<unk>'}
_a : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_a : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(_a ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(_a ) )
def __lowercase ( self : Union[str, Any] ,**_a : Union[str, Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : Any ,_a : Dict ):
'''simple docstring'''
_a : Dict = 'adapt react readapt apt'
_a : int = 'adapt react readapt apt'
return input_text, output_text
def __lowercase ( self : int ):
'''simple docstring'''
_a : List[str] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_a : Dict = 'adapt react readapt apt'
_a : Dict = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
_a : str = tokenizer.tokenize(_a )
self.assertListEqual(_a ,_a )
_a : List[str] = tokens + [tokenizer.unk_token]
_a : List[Any] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,_a )
| 5 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str , __a : str ):
"""simple docstring"""
_a : int = len(__a ) + 1
_a : List[str] = len(__a ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_a : Optional[int] = [[0 for i in range(__a )] for j in range(__a )]
# since string of zero length match pattern of zero length
_a : str = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __a ):
_a : Optional[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __a ):
_a : Dict = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __a ):
for j in range(1 , __a ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_a : Tuple = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_a : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_a : int = dp[i - 1][j]
else:
_a : Any = 0
else:
_a : Optional[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__lowerCAmelCase = """aab"""
__lowerCAmelCase = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 5 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def UpperCAmelCase_ (__a : float , __a : float , __a : float ):
"""simple docstring"""
_a : Tuple = namedtuple('result' , 'name value' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('Only one argument must be 0' )
elif power < 0:
raise ValueError(
'Power cannot be negative in any electrical/electronics system' )
elif voltage == 0:
return result('voltage' , power / current )
elif current == 0:
return result('current' , power / voltage )
elif power == 0:
return result('power' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = BlenderbotSmallTokenizer
__UpperCAmelCase : Tuple = False
def __lowercase ( self : List[Any] ):
'''simple docstring'''
super().setUp()
_a : List[str] = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
_a : Tuple = dict(zip(_a ,range(len(_a ) ) ) )
_a : List[Any] = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
_a : List[Any] = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
_a : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_a : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(_a ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(_a ) )
def __lowercase ( self : List[Any] ,**_a : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : Tuple ,_a : int ):
'''simple docstring'''
_a : Optional[Any] = 'adapt act apte'
_a : Dict = 'adapt act apte'
return input_text, output_text
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_a : Union[str, Any] = 'adapt act apte'
_a : Dict = ['adapt', 'act', 'ap@@', 'te']
_a : Tuple = tokenizer.tokenize(_a )
self.assertListEqual(_a ,_a )
_a : List[str] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_a : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : str = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
_a : Union[str, Any] = 'I am a small frog.'
_a : int = tok([src_text] ,padding=_a ,truncation=_a )['input_ids']
_a : str = tok.batch_decode(_a ,skip_special_tokens=_a ,clean_up_tokenization_spaces=_a )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
_a : Union[str, Any] = 'I am a small frog .'
_a : Optional[Any] = '.'
_a : Optional[Any] = tok(_a )['input_ids']
_a : Union[str, Any] = tok(_a )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 5 | 1 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ (__a : str = "AAPL" ):
"""simple docstring"""
_a : Tuple = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
_a : List[Any] = BeautifulSoup(requests.get(__a ).text , 'html.parser' )
_a : Any = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 5 |
'''simple docstring'''
__lowerCAmelCase = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__lowerCAmelCase = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = 'Morse code here!'
print(__a )
_a : Tuple = encrypt(__a )
print(__a )
_a : str = decrypt(__a )
print(__a )
if __name__ == "__main__":
main()
| 5 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
__lowerCAmelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def UpperCAmelCase_ (__a : Optional[int] , __a : Dict , __a : Optional[Any] , __a : str , __a : Tuple ):
"""simple docstring"""
for attribute in key.split('.' ):
_a : List[str] = getattr(__a , __a )
if weight_type is not None:
_a : Optional[Any] = getattr(__a , __a ).shape
else:
_a : Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_a : Tuple = value
elif weight_type == "weight_g":
_a : Optional[Any] = value
elif weight_type == "weight_v":
_a : Tuple = value
elif weight_type == "bias":
_a : str = value
elif weight_type == "running_mean":
_a : List[Any] = value
elif weight_type == "running_var":
_a : str = value
elif weight_type == "num_batches_tracked":
_a : List[str] = value
elif weight_type == "inv_freq":
_a : Optional[int] = value
else:
_a : Dict = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def UpperCAmelCase_ (__a : str , __a : List[str] , __a : Dict ):
"""simple docstring"""
_a : str = []
_a : Optional[int] = fairseq_model.state_dict()
_a : Optional[Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
_a : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == 'group' , )
_a : List[str] = True
else:
for key, mapped_key in MAPPING.items():
_a : int = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_a : Tuple = True
if "*" in mapped_key:
_a : List[str] = name.split(__a )[0].split('.' )[-2]
_a : int = mapped_key.replace('*' , __a )
if "pos_bias_u" in name:
_a : List[Any] = None
elif "pos_bias_v" in name:
_a : str = None
elif "weight_g" in name:
_a : Optional[Any] = 'weight_g'
elif "weight_v" in name:
_a : Optional[int] = 'weight_v'
elif "bias" in name:
_a : Tuple = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_a : List[str] = 'weight'
elif "running_mean" in name:
_a : Optional[int] = 'running_mean'
elif "inv_freq" in name:
_a : Dict = 'inv_freq'
elif "running_var" in name:
_a : Any = 'running_var'
elif "num_batches_tracked" in name:
_a : List[str] = 'num_batches_tracked'
else:
_a : int = None
set_recursively(__a , __a , __a , __a , __a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCAmelCase_ (__a : str , __a : Union[str, Any] , __a : Any , __a : Union[str, Any] , __a : Union[str, Any] ):
"""simple docstring"""
_a : Tuple = full_name.split('conv_layers.' )[-1]
_a : Optional[Any] = name.split('.' )
_a : Optional[int] = int(items[0] )
_a : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
_a : Dict = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
_a : Dict = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
_a : Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
_a : Optional[int] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def UpperCAmelCase_ (__a : Tuple , __a : Optional[Any] , __a : List[Any]=None , __a : List[Any]=None , __a : Dict=True ):
"""simple docstring"""
if config_path is not None:
_a : int = WavaVecaConformerConfig.from_pretrained(__a , hidden_act='swish' )
else:
_a : Tuple = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
_a : Union[str, Any] = 'rotary'
if is_finetuned:
if dict_path:
_a : Dict = Dictionary.load(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_a : Optional[int] = target_dict.pad_index
_a : List[str] = target_dict.bos_index
_a : int = target_dict.eos_index
_a : Tuple = len(target_dict.symbols )
_a : List[str] = os.path.join(__a , 'vocab.json' )
if not os.path.isdir(__a ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
_a : Union[str, Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
_a : int = 0
_a : Optional[Any] = 1
with open(__a , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(__a , __a )
_a : Any = WavaVecaCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__a , )
_a : Union[str, Any] = True if config.feat_extract_norm == 'layer' else False
_a : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
_a : Optional[Any] = WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
_a : int = WavaVecaConformerForCTC(__a )
else:
_a : Dict = WavaVecaConformerForPreTraining(__a )
if is_finetuned:
_a, _a, _a : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
_a : Optional[int] = argparse.Namespace(task='audio_pretraining' )
_a : Optional[int] = fairseq.tasks.setup_task(__a )
_a, _a, _a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__a )
_a : List[str] = model[0].eval()
recursively_load_weights(__a , __a , not is_finetuned )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCAmelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 5 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls : Optional[Any] ):
'''simple docstring'''
_a : List[Any] = TOKEN
HfFolder.save_token(_a )
@classmethod
def __lowercase ( cls : List[Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-config' )
except HTTPError:
pass
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Any = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('test-config' ,use_auth_token=self._token )
_a : Optional[Any] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a ,repo_id='test-config' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Dict = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Tuple = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' ,use_auth_token=self._token )
_a : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a ,repo_id='valid_org/test-config-org' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
_a : Optional[Any] = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map ,{'AutoConfig': 'custom_configuration.CustomConfig'} )
_a : int = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" ,trust_remote_code=_a )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ ,'CustomConfig' )
self.assertEqual(new_config.attribute ,42 )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_a : int = c.n_embd + 1 # int
_a : str = c.resid_pdrop + 1.0 # float
_a : Dict = not c.scale_attn_weights # bool
_a : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(_a ,c.n_embd ,'mismatch for key: n_embd' )
self.assertEqual(_a ,c.resid_pdrop ,'mismatch for key: resid_pdrop' )
self.assertEqual(_a ,c.scale_attn_weights ,'mismatch for key: scale_attn_weights' )
self.assertEqual(_a ,c.summary_type ,'mismatch for key: summary_type' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : int = PretrainedConfig()
_a : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_a ,['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_a : Dict = [key for key, value in config_common_kwargs.items() if value == getattr(_a ,_a )]
if len(_a ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F""" {', '.join(_a )}.""" )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaises(_a ):
# config is in subfolder, the following should not work without specifying the subfolder
_a : List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_a : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' ,subfolder='bert' )
self.assertIsNotNone(_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[Any] = mock.Mock()
_a : Any = 500
_a : Any = {}
_a : Any = HTTPError
_a : List[Any] = {}
# Download this model to make sure it's in the cache.
_a : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' ,return_value=_a ) as mock_head:
_a : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : int = AutoConfig.from_pretrained('bert-base-cased' )
_a : List[str] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_a )
_a : str = 2
json.dump(configuration.to_dict() ,open(os.path.join(_a ,'config.4.0.0.json' ) ,'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_a : int = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size ,2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_a : Tuple = ['config.42.0.0.json']
_a : int = 768
configuration.save_pretrained(_a )
shutil.move(os.path.join(_a ,'config.4.0.0.json' ) ,os.path.join(_a ,'config.42.0.0.json' ) )
_a : int = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size ,768 )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Tuple = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
_a : Optional[int] = 'v4.0.0'
_a, _a : Tuple = new_transformers.models.auto.AutoConfig.from_pretrained(
_a ,return_unused_kwargs=_a )
self.assertEqual(new_configuration.hidden_size ,2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_a ,{} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_a : str = 'v3.0.0'
_a : Optional[Any] = old_transformers.models.auto.AutoConfig.from_pretrained(_a )
self.assertEqual(old_configuration.hidden_size ,768 )
| 5 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = """▁"""
__lowerCAmelCase = {"""vocab_file""": """spiece.model"""}
__lowerCAmelCase = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
__lowerCAmelCase = {
"""google/reformer-crime-and-punishment""": 5_2_4_2_8_8,
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : int = ['''input_ids''', '''attention_mask''']
def __init__( self : str ,_a : Any ,_a : str="</s>" ,_a : Union[str, Any]="<unk>" ,_a : str=[] ,_a : Optional[Dict[str, Any]] = None ,**_a : Dict ,):
'''simple docstring'''
_a : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_a ,unk_token=_a ,additional_special_tokens=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
_a : Dict = vocab_file
_a : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : str = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
'''simple docstring'''
_a : Optional[int] = self.__dict__.copy()
_a : Dict = None
return state
def __setstate__( self : List[str] ,_a : List[str] ):
'''simple docstring'''
_a : Any = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_a : List[Any] = {}
_a : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowercase ( self : List[Any] ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(_a ,out_type=_a )
def __lowercase ( self : List[str] ,_a : int ):
'''simple docstring'''
return self.sp_model.piece_to_id(_a )
def __lowercase ( self : Dict ,_a : Optional[int] ):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
_a : int = self.sp_model.IdToPiece(_a )
return token
def __lowercase ( self : Tuple ,_a : Dict ):
'''simple docstring'''
_a : Tuple = []
_a : Dict = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_a ) + token
_a : str = []
else:
current_sub_tokens.append(_a )
out_string += self.sp_model.decode(_a )
return out_string.strip()
def __lowercase ( self : Dict ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Dict = os.path.join(
_a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,'wb' ) as fi:
_a : str = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 5 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__lowerCAmelCase = {
"""169M""": 1_2,
"""430M""": 2_4,
"""1B5""": 2_4,
"""3B""": 3_2,
"""7B""": 3_2,
"""14B""": 4_0,
}
__lowerCAmelCase = {
"""169M""": 7_6_8,
"""430M""": 1_0_2_4,
"""1B5""": 2_0_4_8,
"""3B""": 2_5_6_0,
"""7B""": 4_0_9_6,
"""14B""": 5_1_2_0,
}
def UpperCAmelCase_ (__a : Dict ):
"""simple docstring"""
_a : List[Any] = list(state_dict.keys() )
for name in state_dict_keys:
_a : List[Any] = state_dict.pop(__a )
# emb -> embedding
if name.startswith('emb.' ):
_a : List[str] = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
_a : Dict = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
_a : int = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , __a )
# ffn -> feed_forward
_a : str = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , __a )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
_a : Any = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
_a : int = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
_a : Tuple = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
_a : Tuple = 'rwkv.' + name
_a : List[Any] = weight
return state_dict
def UpperCAmelCase_ (__a : Tuple , __a : Union[str, Any] , __a : List[str] , __a : str=None , __a : List[str]=None , __a : int=False , __a : int=None ):
"""simple docstring"""
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
_a : List[Any] = 5_0_2_7_7
_a : Optional[Any] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
_a : Optional[Any] = PreTrainedTokenizerFast(tokenizer_file=__a )
_a : List[Any] = len(__a )
tokenizer.save_pretrained(__a )
# 2. Build the config
_a : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_a : str = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
_a : str = RwkvConfig(
vocab_size=__a , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__a )
# 3. Download model file then convert state_dict
_a : Tuple = hf_hub_download(__a , __a )
_a : Optional[int] = torch.load(__a , map_location='cpu' )
_a : Dict = convert_state_dict(__a )
# 4. Split in shards and save
_a, _a : List[Any] = shard_checkpoint(__a )
for shard_file, shard in shards.items():
torch.save(__a , os.path.join(__a , __a ) )
if index is not None:
_a : Dict = os.path.join(__a , __a )
# Save the index as well
with open(__a , 'w' , encoding='utf-8' ) as f:
_a : List[Any] = json.dumps(__a , indent=2 , sort_keys=__a ) + '\n'
f.write(__a )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
_a : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_a : Optional[Any] = torch.load(os.path.join(__a , __a ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__a , __a ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
_a : List[str] = AutoModelForCausalLM.from_pretrained(__a )
model.push_to_hub(__a , max_shard_size='2GB' )
tokenizer.push_to_hub(__a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
__lowerCAmelCase = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 5 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
pass
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Dict ,_a : Any ):
'''simple docstring'''
_a : Any = data
_a : Node | None = None
def __iter__( self : int ):
'''simple docstring'''
_a : int = self
_a : Dict = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(_a )
yield node.data
_a : Any = node.next_node
@property
def __lowercase ( self : Tuple ):
'''simple docstring'''
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__lowerCAmelCase = Node(1)
__lowerCAmelCase = Node(2)
__lowerCAmelCase = Node(3)
__lowerCAmelCase = Node(4)
print(root_node.has_loop) # False
__lowerCAmelCase = root_node.next_node
print(root_node.has_loop) # True
__lowerCAmelCase = Node(5)
__lowerCAmelCase = Node(6)
__lowerCAmelCase = Node(5)
__lowerCAmelCase = Node(6)
print(root_node.has_loop) # False
__lowerCAmelCase = Node(1)
print(root_node.has_loop) # False
| 5 |
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
__lowerCAmelCase = datasets.logging.get_logger(__name__)
__lowerCAmelCase = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
__lowerCAmelCase = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
__lowerCAmelCase = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://unbabel.github.io/COMET/html/index.html' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'sources': datasets.Value('string' ,id='sequence' ),
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/Unbabel/COMET'] ,reference_urls=[
'https://github.com/Unbabel/COMET',
'https://www.aclweb.org/anthology/2020.emnlp-main.213/',
'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6',
] ,)
def __lowercase ( self : int ,_a : int ):
'''simple docstring'''
if self.config_name == "default":
_a : List[Any] = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) )
else:
_a : List[str] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __lowercase ( self : Tuple ,_a : List[Any] ,_a : Dict ,_a : Optional[Any] ,_a : List[str]=None ,_a : Tuple=False ):
'''simple docstring'''
if gpus is None:
_a : str = 1 if torch.cuda.is_available() else 0
_a : Optional[Any] = {'src': sources, 'mt': predictions, 'ref': references}
_a : Optional[Any] = [dict(zip(_a ,_a ) ) for t in zip(*data.values() )]
_a, _a : Tuple = self.scorer.predict(_a ,gpus=_a ,progress_bar=_a )
return {"mean_score": mean_score, "scores": scores}
| 5 | 1 |
'''simple docstring'''
def UpperCAmelCase_ (__a : float , __a : float , __a : float , __a : float , __a : float , ):
"""simple docstring"""
_a : Dict = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
_a : str = 1 - (matter_density + radiation_density + dark_energy)
_a : List[str] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
_a : List[str] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
__lowerCAmelCase = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 5 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = MBartConfig
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : List[Any] = '''gelu'''
def __init__( self : Dict ,_a : Union[str, Any] ,_a : List[str]=13 ,_a : List[str]=7 ,_a : List[str]=True ,_a : List[Any]=False ,_a : Any=99 ,_a : int=32 ,_a : Any=2 ,_a : List[Any]=4 ,_a : Optional[Any]=37 ,_a : Tuple=0.1 ,_a : str=0.1 ,_a : int=20 ,_a : Tuple=2 ,_a : Tuple=1 ,_a : Union[str, Any]=0 ,):
'''simple docstring'''
_a : Any = parent
_a : int = batch_size
_a : Optional[int] = seq_length
_a : int = is_training
_a : Dict = use_labels
_a : Optional[Any] = vocab_size
_a : Union[str, Any] = hidden_size
_a : Any = num_hidden_layers
_a : Dict = num_attention_heads
_a : List[Any] = intermediate_size
_a : Dict = hidden_dropout_prob
_a : Optional[Any] = attention_probs_dropout_prob
_a : Union[str, Any] = max_position_embeddings
_a : Optional[int] = eos_token_id
_a : List[str] = pad_token_id
_a : str = bos_token_id
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
_a : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
_a : str = tf.concat([input_ids, eos_tensor] ,axis=1 )
_a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_a : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
_a : int = prepare_mbart_inputs_dict(_a ,_a ,_a )
return config, inputs_dict
def __lowercase ( self : List[Any] ,_a : Optional[Any] ,_a : Optional[Any] ):
'''simple docstring'''
_a : Any = TFMBartModel(config=_a ).get_decoder()
_a : Tuple = inputs_dict['input_ids']
_a : Optional[int] = input_ids[:1, :]
_a : int = inputs_dict['attention_mask'][:1, :]
_a : List[str] = inputs_dict['head_mask']
_a : List[Any] = 1
# first forward pass
_a : List[Any] = model(_a ,attention_mask=_a ,head_mask=_a ,use_cache=_a )
_a, _a : List[str] = outputs.to_tuple()
_a : Union[str, Any] = past_key_values[1]
def UpperCAmelCase_ (__a : Tuple , __a : List[Any] , __a : List[str] , __a : int=None , __a : Optional[int]=None , __a : Dict=None , __a : List[str]=None , __a : Union[str, Any]=None , ):
"""simple docstring"""
if attention_mask is None:
_a : Union[str, Any] = tf.cast(tf.math.not_equal(__a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_a : Tuple = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_a : str = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_a : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_a : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__UpperCAmelCase : Tuple = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__UpperCAmelCase : Any = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Dict = False
def __lowercase ( self : int ,_a : List[str] ,_a : List[str] ,_a : Optional[int] ,_a : Any ,_a : Union[str, Any] ):
'''simple docstring'''
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Tuple = TFMBartModelTester(self )
_a : List[Any] = ConfigTester(self ,config_class=_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
__UpperCAmelCase : Union[str, Any] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
__UpperCAmelCase : Dict = '''facebook/mbart-large-en-ro'''
@cached_property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __lowercase ( self : List[Any] ,**_a : Tuple ):
'''simple docstring'''
_a : int = self.translate_src_text(**_a )
self.assertListEqual(self.expected_text ,_a )
def __lowercase ( self : Optional[int] ,**_a : Any ):
'''simple docstring'''
_a : Dict = self.tokenizer(self.src_text ,**_a ,return_tensors='tf' )
_a : Dict = self.model.generate(
model_inputs.input_ids ,attention_mask=model_inputs.attention_mask ,num_beams=2 )
_a : Optional[int] = self.tokenizer.batch_decode(_a ,skip_special_tokens=_a )
return generated_words
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 5 |
'''simple docstring'''
import sys
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : List[str] = len(__a )
_a : Dict = [[0 for x in range(__a )] for x in range(__a )]
_a : Union[str, Any] = [[0 for x in range(__a )] for x in range(__a )]
for chain_length in range(2 , __a ):
for a in range(1 , n - chain_length + 1 ):
_a : Tuple = a + chain_length - 1
_a : Any = sys.maxsize
for c in range(__a , __a ):
_a : Optional[Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_a : Dict = cost
_a : Any = c
return matrix, sol
def UpperCAmelCase_ (__a : Tuple , __a : List[str] , __a : Dict ):
"""simple docstring"""
if i == j:
print('A' + str(__a ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(__a , __a , optimal_solution[i][j] )
print_optiomal_solution(__a , optimal_solution[i][j] + 1 , __a )
print(')' , end=' ' )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
_a : Any = len(__a )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_a, _a : Union[str, Any] = matrix_chain_order(__a )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__a , 1 , n - 1 )
if __name__ == "__main__":
main()
| 5 | 1 |
'''simple docstring'''
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,*_a : Optional[Any] ,_a : List[str]=None ,_a : Tuple=None ,**_a : int ):
'''simple docstring'''
super().__init__(*_a ,**_a )
_a : int = eval_examples
_a : Optional[Any] = post_process_function
def __lowercase ( self : Tuple ,_a : Optional[int]=None ,_a : Optional[Any]=None ,_a : int=None ,_a : str = "eval" ):
'''simple docstring'''
_a : Dict = self.eval_dataset if eval_dataset is None else eval_dataset
_a : Union[str, Any] = self.get_eval_dataloader(_a )
_a : Tuple = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_a : int = self.compute_metrics
_a : List[str] = None
_a : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_a : List[Any] = time.time()
try:
_a : Optional[Any] = eval_loop(
_a ,description='Evaluation' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=_a ,metric_key_prefix=_a ,)
finally:
_a : List[Any] = compute_metrics
_a : Optional[Any] = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_a ,_a ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size ) ,) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_a : List[Any] = self.post_process_function(_a ,_a ,output.predictions )
_a : str = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
_a : Optional[int] = metrics.pop(_a )
metrics.update(output.metrics )
else:
_a : Dict = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_a )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_a : Optional[int] = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,_a )
return metrics
def __lowercase ( self : Union[str, Any] ,_a : Optional[Any] ,_a : List[Any] ,_a : int=None ,_a : str = "test" ):
'''simple docstring'''
_a : Optional[int] = self.get_test_dataloader(_a )
# Temporarily disable metric computation, we will do it in the loop here.
_a : int = self.compute_metrics
_a : Tuple = None
_a : Dict = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_a : List[Any] = time.time()
try:
_a : str = eval_loop(
_a ,description='Prediction' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=_a ,metric_key_prefix=_a ,)
finally:
_a : Optional[int] = compute_metrics
_a : Any = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_a ,_a ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size ) ,) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_a : Dict = self.post_process_function(_a ,_a ,output.predictions ,'predict' )
_a : Optional[Any] = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
_a : Tuple = metrics.pop(_a )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=_a )
| 5 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase_ (__a : Optional[Any] ):
"""simple docstring"""
_a : int = FileLock(str(tmpdir / 'foo.lock' ) )
_a : List[Any] = FileLock(str(tmpdir / 'foo.lock' ) )
_a : Any = 0.01
with locka.acquire():
with pytest.raises(__a ):
_a : int = time.time()
locka.acquire(__a )
assert time.time() - _start > timeout
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Dict = 'a' * 1_0_0_0 + '.lock'
_a : int = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(__a )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
_a : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__a ):
locka.acquire(0 )
| 5 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__lowerCAmelCase = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 |
'''simple docstring'''
def UpperCAmelCase_ (__a : int = 1_0**1_2 ):
"""simple docstring"""
_a : List[str] = 1
_a : Optional[int] = 0
_a : Any = 1
_a : List[str] = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 1 |
'''simple docstring'''
def UpperCAmelCase_ (__a : int = 1_0_0 ):
"""simple docstring"""
_a : Any = (n * (n + 1) // 2) ** 2
_a : Tuple = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCAmelCase = {
"""vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""},
"""tokenizer_file""": {
"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"""
},
}
__lowerCAmelCase = {"""mobilebert-uncased""": 5_1_2}
__lowerCAmelCase = {}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = VOCAB_FILES_NAMES
__UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[Any] = MobileBertTokenizer
def __init__( self : Dict ,_a : List[Any]=None ,_a : Optional[Any]=None ,_a : Union[str, Any]=True ,_a : Dict="[UNK]" ,_a : Union[str, Any]="[SEP]" ,_a : Any="[PAD]" ,_a : Optional[int]="[CLS]" ,_a : Optional[Any]="[MASK]" ,_a : Dict=True ,_a : Any=None ,**_a : Optional[Any] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
_a : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_a ) != do_lower_case
or normalizer_state.get('strip_accents' ,_a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_a ) != tokenize_chinese_chars
):
_a : Optional[Any] = getattr(_a ,normalizer_state.pop('type' ) )
_a : Dict = do_lower_case
_a : str = strip_accents
_a : Tuple = tokenize_chinese_chars
_a : Optional[Any] = normalizer_class(**_a )
_a : str = do_lower_case
def __lowercase ( self : Tuple ,_a : Union[str, Any] ,_a : List[str]=None ):
'''simple docstring'''
_a : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
_a : List[str] = [self.sep_token_id]
_a : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : Union[str, Any] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
_a : int = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 5 | 1 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
# TODO Update this
__lowerCAmelCase = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : int = '''esm'''
def __init__( self : Optional[Any] ,_a : Tuple=None ,_a : Optional[Any]=None ,_a : Optional[int]=None ,_a : Tuple=768 ,_a : Tuple=12 ,_a : Optional[int]=12 ,_a : str=3072 ,_a : int=0.1 ,_a : Dict=0.1 ,_a : List[str]=1026 ,_a : List[Any]=0.02 ,_a : Dict=1E-12 ,_a : Union[str, Any]="absolute" ,_a : Dict=True ,_a : Tuple=None ,_a : List[Any]=False ,_a : List[Any]=False ,_a : List[str]=None ,_a : Optional[Any]=None ,**_a : List[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,mask_token_id=_a ,**_a )
_a : List[Any] = vocab_size
_a : List[Any] = hidden_size
_a : Union[str, Any] = num_hidden_layers
_a : Optional[Any] = num_attention_heads
_a : Union[str, Any] = intermediate_size
_a : Optional[int] = hidden_dropout_prob
_a : int = attention_probs_dropout_prob
_a : Dict = max_position_embeddings
_a : Tuple = initializer_range
_a : int = layer_norm_eps
_a : Optional[Any] = position_embedding_type
_a : List[str] = use_cache
_a : Optional[int] = emb_layer_norm_before
_a : Any = token_dropout
_a : Tuple = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
_a : Any = EsmFoldConfig()
elif isinstance(_a ,_a ):
_a : Optional[Any] = EsmFoldConfig(**_a )
_a : str = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
_a : List[str] = get_default_vocab_list()
else:
_a : Tuple = vocab_list
else:
_a : Optional[int] = None
_a : Optional[Any] = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,'use_esm_attn_map' ,_a ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def __lowercase ( self : int ):
'''simple docstring'''
_a : str = super().to_dict()
if isinstance(self.esmfold_config ,_a ):
_a : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : str = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : bool = False
__UpperCAmelCase : bool = False
__UpperCAmelCase : float = 0
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : int = 128
__UpperCAmelCase : "TrunkConfig" = None
def __lowercase ( self : Dict ):
'''simple docstring'''
if self.trunk is None:
_a : Union[str, Any] = TrunkConfig()
elif isinstance(self.trunk ,_a ):
_a : Any = TrunkConfig(**self.trunk )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Dict = asdict(self )
_a : List[str] = self.trunk.to_dict()
return output
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : int = 48
__UpperCAmelCase : int = 1024
__UpperCAmelCase : int = 128
__UpperCAmelCase : int = 32
__UpperCAmelCase : int = 32
__UpperCAmelCase : int = 32
__UpperCAmelCase : float = 0
__UpperCAmelCase : float = 0
__UpperCAmelCase : bool = False
__UpperCAmelCase : int = 4
__UpperCAmelCase : Optional[int] = 128
__UpperCAmelCase : "StructureModuleConfig" = None
def __lowercase ( self : Any ):
'''simple docstring'''
if self.structure_module is None:
_a : Any = StructureModuleConfig()
elif isinstance(self.structure_module ,_a ):
_a : int = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
_a : List[str] = self.sequence_state_dim // self.sequence_head_width
_a : Optional[Any] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Optional[Any] = asdict(self )
_a : Dict = self.structure_module.to_dict()
return output
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : int = 384
__UpperCAmelCase : int = 128
__UpperCAmelCase : int = 16
__UpperCAmelCase : int = 128
__UpperCAmelCase : int = 12
__UpperCAmelCase : int = 4
__UpperCAmelCase : int = 8
__UpperCAmelCase : float = 0.1
__UpperCAmelCase : int = 8
__UpperCAmelCase : int = 1
__UpperCAmelCase : int = 2
__UpperCAmelCase : int = 7
__UpperCAmelCase : int = 10
__UpperCAmelCase : float = 1e-8
__UpperCAmelCase : float = 1e5
def __lowercase ( self : Tuple ):
'''simple docstring'''
return asdict(self )
def UpperCAmelCase_ ():
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 5 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : List[Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_a : Optional[int] = ''
_a : List[str] = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_a, _a : Optional[int] = 0, 0
# length[i] shows the length of palindromic substring with center i
_a : Optional[Any] = [1 for i in range(len(__a ) )]
# for each character in new_string find corresponding palindromic string
_a : Dict = 0
for j in range(len(__a ) ):
_a : Dict = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_a : Optional[int] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_a : str = j - k + 1 # noqa: E741
_a : Any = j + k - 1
# update max_length and start position
if max_length < length[j]:
_a : Union[str, Any] = length[j]
_a : List[str] = j
# create that string
_a : Tuple = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''wavlm'''
def __init__( self : List[Any] ,_a : Any=32 ,_a : str=768 ,_a : List[str]=12 ,_a : Optional[Any]=12 ,_a : Union[str, Any]=3072 ,_a : Dict="gelu" ,_a : Union[str, Any]=0.1 ,_a : str=0.1 ,_a : List[Any]=0.1 ,_a : Optional[Any]=0.0 ,_a : Dict=0.1 ,_a : Optional[int]=0.1 ,_a : List[str]=0.02 ,_a : Any=1E-5 ,_a : str="group" ,_a : int="gelu" ,_a : str=(512, 512, 512, 512, 512, 512, 512) ,_a : Any=(5, 2, 2, 2, 2, 2, 2) ,_a : List[str]=(10, 3, 3, 3, 3, 2, 2) ,_a : List[str]=False ,_a : int=128 ,_a : Optional[Any]=16 ,_a : Tuple=320 ,_a : Optional[int]=800 ,_a : Tuple=False ,_a : Any=True ,_a : str=0.05 ,_a : Optional[Any]=10 ,_a : Any=2 ,_a : int=0.0 ,_a : int=10 ,_a : Optional[int]=320 ,_a : List[Any]=2 ,_a : Any=0.1 ,_a : Tuple=100 ,_a : Tuple=256 ,_a : int=256 ,_a : Tuple=0.1 ,_a : Optional[int]="mean" ,_a : List[str]=False ,_a : Union[str, Any]=False ,_a : Optional[int]=256 ,_a : Any=(512, 512, 512, 512, 1500) ,_a : Union[str, Any]=(5, 3, 3, 1, 1) ,_a : Union[str, Any]=(1, 2, 3, 1, 1) ,_a : Tuple=512 ,_a : Union[str, Any]=80 ,_a : int=0 ,_a : Dict=1 ,_a : Tuple=2 ,_a : str=False ,_a : Any=3 ,_a : Any=2 ,_a : Union[str, Any]=3 ,_a : List[str]=None ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(**_a ,pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a )
_a : Any = hidden_size
_a : Union[str, Any] = feat_extract_norm
_a : List[str] = feat_extract_activation
_a : Optional[int] = list(_a )
_a : str = list(_a )
_a : Dict = list(_a )
_a : int = conv_bias
_a : Dict = num_buckets
_a : Optional[Any] = max_bucket_distance
_a : List[str] = num_conv_pos_embeddings
_a : int = num_conv_pos_embedding_groups
_a : Union[str, Any] = len(self.conv_dim )
_a : Optional[int] = num_hidden_layers
_a : Any = intermediate_size
_a : Any = hidden_act
_a : Optional[int] = num_attention_heads
_a : str = hidden_dropout
_a : Tuple = attention_dropout
_a : Union[str, Any] = activation_dropout
_a : str = feat_proj_dropout
_a : str = final_dropout
_a : List[str] = layerdrop
_a : List[str] = layer_norm_eps
_a : str = initializer_range
_a : str = num_ctc_classes
_a : Union[str, Any] = vocab_size
_a : Dict = do_stable_layer_norm
_a : Optional[int] = use_weighted_layer_sum
_a : List[str] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a : List[str] = apply_spec_augment
_a : Tuple = mask_time_prob
_a : Optional[Any] = mask_time_length
_a : List[str] = mask_time_min_masks
_a : Tuple = mask_feature_prob
_a : List[Any] = mask_feature_length
# parameters for pretraining with codevector quantized representations
_a : List[Any] = num_codevectors_per_group
_a : Any = num_codevector_groups
_a : Any = contrastive_logits_temperature
_a : Union[str, Any] = num_negatives
_a : Tuple = codevector_dim
_a : Tuple = proj_codevector_dim
_a : List[Any] = diversity_loss_weight
# ctc loss
_a : int = ctc_loss_reduction
_a : List[Any] = ctc_zero_infinity
# adapter
_a : Optional[Any] = add_adapter
_a : int = adapter_kernel_size
_a : List[Any] = adapter_stride
_a : Tuple = num_adapter_layers
_a : Dict = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_a : Tuple = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_a : Optional[int] = list(_a )
_a : Dict = list(_a )
_a : str = list(_a )
_a : List[str] = xvector_output_dim
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 5 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
"""configuration_blip_2""": [
"""BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Blip2Config""",
"""Blip2QFormerConfig""",
"""Blip2VisionConfig""",
],
"""processing_blip_2""": ["""Blip2Processor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Blip2Model""",
"""Blip2QFormerModel""",
"""Blip2PreTrainedModel""",
"""Blip2ForConditionalGeneration""",
"""Blip2VisionModel""",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__lowerCAmelCase = threading.Lock()
__lowerCAmelCase = None
__lowerCAmelCase = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
__lowerCAmelCase = logging.WARNING
__lowerCAmelCase = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Dict = os.getenv('TRANSFORMERS_VERBOSITY' , __a )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def UpperCAmelCase_ ():
"""simple docstring"""
return __name__.split('.' )[0]
def UpperCAmelCase_ ():
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_a : str = logging.StreamHandler() # Set sys.stderr as stream.
_a : Optional[Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
_a : List[Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_a : List[str] = False
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
_a : int = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_a : str = None
def UpperCAmelCase_ ():
"""simple docstring"""
return log_levels
def UpperCAmelCase_ (__a : Optional[str] = None ):
"""simple docstring"""
if name is None:
_a : List[Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__a )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : Union[str, Any] = False
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : Dict = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = _get_library_root_logger().handlers
for handler in handlers:
_a : Union[str, Any] = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Union[str, Any] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__a )
def UpperCAmelCase_ (self : Union[str, Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ):
"""simple docstring"""
_a : Union[str, Any] = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , __a )
if no_advisory_warnings:
return
self.warning(*__a , **__a )
__lowerCAmelCase = warning_advice
@functools.lru_cache(__a )
def UpperCAmelCase_ (self : int , *__a : Optional[Any] , **__a : Any ):
"""simple docstring"""
self.warning(*__a , **__a )
__lowerCAmelCase = warning_once
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Any ,*_a : Tuple ,**_a : int ): # pylint: disable=unused-argument
'''simple docstring'''
_a : int = args[0] if args else None
def __iter__( self : str ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : List[Any] ,_a : int ):
'''simple docstring'''
def empty_fn(*_a : Optional[Any] ,**_a : Any ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : List[str] ):
'''simple docstring'''
return self
def __exit__( self : List[str] ,_a : str ,_a : List[Any] ,_a : str ):
'''simple docstring'''
return
class UpperCAmelCase__ :
"""simple docstring"""
def __call__( self : Union[str, Any] ,*_a : Tuple ,**_a : Tuple ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*_a ,**_a )
else:
return EmptyTqdm(*_a ,**_a )
def __lowercase ( self : str ,*_a : List[Any] ,**_a : Any ):
'''simple docstring'''
_a : Any = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_a ,**_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__lowerCAmelCase = _tqdm_cls()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : str = True
hf_hub_utils.enable_progress_bars()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : Dict = False
hf_hub_utils.disable_progress_bars()
| 5 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 |
'''simple docstring'''
def UpperCAmelCase_ (__a : list[int] , __a : list[int] ):
"""simple docstring"""
if not len(__a ) == len(__a ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_a, _a, _a : Tuple = equationa
_a, _a, _a : str = equationa
# Calculate the determinants of the matrices
_a : Union[str, Any] = aa * ba - aa * ba
_a : List[Any] = ca * ba - ca * ba
_a : List[Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_a : int = determinant_x / determinant
_a : List[str] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 5 | 1 |
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] ,_a : List[str] ,_a : Optional[Any]=99 ,_a : Tuple=13 ,_a : Any=7 ,_a : List[Any]=9 ,_a : str=True ,_a : Optional[Any]=True ,_a : List[str]=False ,_a : Optional[Any]=32 ,_a : Dict=5 ,_a : Optional[int]=4 ,_a : Optional[Any]=37 ,_a : List[Any]=8 ,_a : Optional[int]=0.1 ,_a : List[str]=0.002 ,_a : int=1 ,_a : Dict=0 ,_a : List[str]=0 ,_a : Dict=None ,_a : Dict=None ,):
'''simple docstring'''
_a : str = parent
_a : Dict = batch_size
_a : Optional[int] = encoder_seq_length
_a : List[str] = decoder_seq_length
# For common tests
_a : Tuple = self.decoder_seq_length
_a : Dict = is_training
_a : Optional[Any] = use_attention_mask
_a : Dict = use_labels
_a : Optional[Any] = vocab_size
_a : int = hidden_size
_a : List[str] = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : Optional[int] = d_ff
_a : List[str] = relative_attention_num_buckets
_a : List[str] = dropout_rate
_a : Dict = initializer_factor
_a : int = eos_token_id
_a : Union[str, Any] = pad_token_id
_a : Tuple = decoder_start_token_id
_a : int = None
_a : Union[str, Any] = decoder_layers
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return TaConfig.from_pretrained('google/umt5-base' )
def __lowercase ( self : str ,_a : Union[str, Any] ,_a : int ,_a : Any ,_a : str=None ,_a : List[str]=None ,_a : str=None ,_a : Tuple=None ,_a : Tuple=None ,):
'''simple docstring'''
if attention_mask is None:
_a : Tuple = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_a : Any = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_a : Tuple = torch.ones(config.num_hidden_layers ,config.num_attention_heads ,device=_a )
if decoder_head_mask is None:
_a : Tuple = torch.ones(config.num_decoder_layers ,config.num_attention_heads ,device=_a )
if cross_attn_head_mask is None:
_a : Tuple = torch.ones(
config.num_decoder_layers ,config.num_attention_heads ,device=_a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Any = ids_tensor([self.batch_size, self.encoder_seq_length] ,self.vocab_size )
_a : Any = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_a : Any = input_ids.clamp(self.pad_token_id + 1 )
_a : Any = decoder_input_ids.clamp(self.pad_token_id + 1 )
_a : int = self.get_config()
_a : List[str] = config.num_attention_heads
_a : List[Any] = self.prepare_inputs_dict(_a ,_a ,_a )
return config, input_dict
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a, _a : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowercase ( self : Any ):
'''simple docstring'''
return TaConfig(
vocab_size=166 ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __lowercase ( self : Dict ):
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __lowercase ( self : List[Any] ,_a : Dict ,_a : List[str] ,_a : Optional[int] ,_a : Optional[int] ,_a : Union[str, Any] ,_a : int ,):
'''simple docstring'''
_a : Union[str, Any] = UMTaModel(config=_a )
model.to(_a )
model.eval()
_a : Tuple = model(
input_ids=_a ,decoder_input_ids=_a ,attention_mask=_a ,decoder_attention_mask=_a ,)
_a : List[str] = model(input_ids=_a ,decoder_input_ids=_a )
_a : int = result.last_hidden_state
_a : Dict = result.past_key_values
_a : List[Any] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() ,(self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() ,(self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_a ) ,config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) ,4 )
def __lowercase ( self : str ,_a : int ,_a : Dict ,_a : Union[str, Any] ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Tuple ,):
'''simple docstring'''
_a : Any = UMTaModel(config=_a ).get_decoder().to(_a ).eval()
# first forward pass
_a : Any = model(_a ,use_cache=_a )
_a : Optional[Any] = model(_a )
_a : Dict = model(_a ,use_cache=_a )
self.parent.assertTrue(len(_a ) == len(_a ) )
self.parent.assertTrue(len(_a ) == len(_a ) + 1 )
_a, _a : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_a : List[str] = ids_tensor((self.batch_size, 1) ,config.vocab_size )
# append to next input_ids and
_a : List[str] = torch.cat([input_ids, next_tokens] ,dim=-1 )
_a : Tuple = model(_a )['last_hidden_state']
_a : Tuple = model(_a ,past_key_values=_a )['last_hidden_state']
# select random slice
_a : Tuple = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
_a : Optional[Any] = output_from_no_past[:, -1, random_slice_idx].detach()
_a : Dict = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a ,_a ,atol=1E-3 ) )
def __lowercase ( self : Optional[Any] ,_a : int ,_a : str ,):
'''simple docstring'''
_a : Optional[Any] = UMTaModel(config=_a ).to(_a ).half().eval()
_a : List[str] = model(**_a )['last_hidden_state']
self.parent.assertFalse(torch.isnan(_a ).any().item() )
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
__UpperCAmelCase : List[Any] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
__UpperCAmelCase : List[Any] = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : str = True
__UpperCAmelCase : int = False
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : int = True
# The small UMT5 model needs higher percentages for CPU/MP tests
__UpperCAmelCase : List[Any] = [0.8, 0.9]
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[int] = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
_a : str = UMTaModel(config_and_inputs[0] ).to(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_a ,(config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) ,F"""{tmpdirname}/t5_test.onnx""" ,export_params=_a ,opset_version=9 ,input_names=['input_ids', 'decoder_input_ids'] ,)
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[str] = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
_a : Optional[int] = config_and_inputs[0]
_a : Optional[int] = UMTaForConditionalGeneration(_a ).eval()
model.to(_a )
_a : Tuple = {
'head_mask': torch.zeros(config.num_layers ,config.num_heads ,device=_a ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=_a ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=_a ),
}
for attn_name, (name, mask) in zip(_a ,head_masking.items() ):
_a : Optional[Any] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_a : Union[str, Any] = torch.ones(
config.num_decoder_layers ,config.num_heads ,device=_a )
_a : Dict = model.generate(
config_and_inputs[1]['input_ids'] ,num_beams=1 ,max_length=3 ,output_attentions=_a ,return_dict_in_generate=_a ,**_a ,)
# We check the state of decoder_attentions and cross_attentions just from the last step
_a : Any = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) ,0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Any = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' ,return_dict=_a ).to(_a )
_a : Optional[Any] = AutoTokenizer.from_pretrained('google/umt5-small' ,use_fast=_a ,legacy=_a )
_a : Any = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
_a : List[str] = tokenizer(_a ,return_tensors='pt' ,padding=_a ).input_ids
# fmt: off
_a : Dict = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(_a ,_a )
_a : Optional[int] = model.generate(input_ids.to(_a ) )
_a : str = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
_a : Dict = tokenizer.batch_decode(_a )
self.assertEqual(_a ,_a )
| 5 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ,_a : List[str] ,_a : Optional[Any]=13 ,_a : str=30 ,_a : str=2 ,_a : Union[str, Any]=3 ,_a : Optional[Any]=True ,_a : int=True ,_a : Union[str, Any]=32 ,_a : List[Any]=5 ,_a : Union[str, Any]=4 ,_a : int=37 ,_a : Any="gelu" ,_a : Union[str, Any]=0.1 ,_a : str=0.1 ,_a : List[str]=10 ,_a : Dict=0.02 ,_a : Tuple=None ,):
'''simple docstring'''
_a : Any = parent
_a : int = batch_size
_a : List[Any] = image_size
_a : Optional[int] = patch_size
_a : List[str] = num_channels
_a : Dict = is_training
_a : Dict = use_labels
_a : Optional[Any] = hidden_size
_a : str = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : Dict = intermediate_size
_a : Union[str, Any] = hidden_act
_a : List[str] = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : List[str] = type_sequence_label_size
_a : int = initializer_range
_a : List[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_a : Union[str, Any] = (image_size // patch_size) ** 2
_a : Tuple = num_patches + 1
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : str = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : List[str] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def __lowercase ( self : Tuple ,_a : Any ,_a : List[Any] ,_a : int ):
'''simple docstring'''
_a : str = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
_a : int = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : List[Any] ,_a : str ,_a : Tuple ,_a : Dict ):
'''simple docstring'''
_a : Tuple = self.type_sequence_label_size
_a : int = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Dict = model(_a ,labels=_a )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a : int = 1
_a : Optional[Any] = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = self.prepare_config_and_inputs()
_a, _a, _a : int = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCAmelCase : List[Any] = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : List[str] = ViTMSNModelTester(self )
_a : Optional[int] = ConfigTester(self ,config_class=_a ,has_text_modality=_a ,hidden_size=37 )
def __lowercase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a, _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a ,nn.Linear ) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a, _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[str] = model_class(_a )
_a : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : List[Any] = [*signature.parameters.keys()]
_a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self : int ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Dict = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(2 )
_a : List[str] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(_a )
_a : List[str] = self.default_image_processor
_a : int = prepare_img()
_a : Tuple = image_processor(images=_a ,return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
_a : Optional[int] = model(**_a )
# verify the logits
_a : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_a )
_a : List[Any] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_a ,atol=1E-4 ) )
| 5 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = """▁"""
__lowerCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
__lowerCAmelCase = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
__lowerCAmelCase = {
"""xlm-roberta-base""": 5_1_2,
"""xlm-roberta-large""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-dutch""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-spanish""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-english""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-german""": 5_1_2,
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES
__UpperCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple ,_a : str ,_a : Any="<s>" ,_a : Optional[Any]="</s>" ,_a : Union[str, Any]="</s>" ,_a : Union[str, Any]="<s>" ,_a : Optional[int]="<unk>" ,_a : Union[str, Any]="<pad>" ,_a : int="<mask>" ,_a : Optional[Dict[str, Any]] = None ,**_a : int ,):
'''simple docstring'''
_a : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
_a : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
_a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
_a : Optional[int] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_a : List[str] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_a : List[str] = 1
_a : Tuple = len(self.sp_model ) + self.fairseq_offset
_a : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Dict ):
'''simple docstring'''
_a : List[Any] = self.__dict__.copy()
_a : Optional[Any] = None
_a : Dict = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Any ,_a : int ):
'''simple docstring'''
_a : Optional[Any] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_a : str = {}
_a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowercase ( self : List[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : List[str] = [self.cls_token_id]
_a : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowercase ( self : Any ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def __lowercase ( self : Any ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
_a : List[Any] = [self.sep_token_id]
_a : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def __lowercase ( self : int ):
'''simple docstring'''
_a : Union[str, Any] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowercase ( self : Tuple ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(_a ,out_type=_a )
def __lowercase ( self : Optional[int] ,_a : Dict ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_a : Optional[Any] = self.sp_model.PieceToId(_a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowercase ( self : List[str] ,_a : int ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowercase ( self : Dict ,_a : Union[str, Any] ):
'''simple docstring'''
_a : Dict = ''.join(_a ).replace(_a ,' ' ).strip()
return out_string
def __lowercase ( self : Any ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Any = os.path.join(
_a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,'wb' ) as fi:
_a : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 5 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ (__a : str = "https://www.worldometers.info/coronavirus" ):
"""simple docstring"""
_a : List[str] = BeautifulSoup(requests.get(__a ).text , 'html.parser' )
_a : Dict = soup.findAll('h1' )
_a : Union[str, Any] = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__a , __a )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5 | 1 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str , __a : Any ):
"""simple docstring"""
_a : List[str] = ''
for i in table:
res += inp[i - 1]
return res
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
return data[1:] + data[0]
def UpperCAmelCase_ (__a : Dict , __a : Optional[int] ):
"""simple docstring"""
_a : Dict = ''
for i in range(len(__a ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def UpperCAmelCase_ (__a : List[str] , __a : Any ):
"""simple docstring"""
_a : Optional[Any] = int('0b' + data[0] + data[-1] , 2 )
_a : Dict = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def UpperCAmelCase_ (__a : List[str] , __a : List[str] , __a : Optional[int] , __a : str , __a : Optional[int] ):
"""simple docstring"""
_a : Union[str, Any] = message[:4]
_a : int = message[4:]
_a : Union[str, Any] = apply_table(__a , __a )
_a : List[Any] = xor(__a , __a )
_a : int = apply_sbox(__a , temp[:4] ) # noqa: E741
_a : int = apply_sbox(__a , temp[4:] )
_a : Dict = '0' * (2 - len(__a )) + l # noqa: E741
_a : Optional[Any] = '0' * (2 - len(__a )) + r
_a : Tuple = apply_table(l + r , __a )
_a : List[str] = xor(__a , __a )
return temp + right
if __name__ == "__main__":
__lowerCAmelCase = input("""Enter 10 bit key: """)
__lowerCAmelCase = input("""Enter 8 bit message: """)
__lowerCAmelCase = [6, 3, 7, 4, 8, 5, 1_0, 9]
__lowerCAmelCase = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
__lowerCAmelCase = [2, 4, 3, 1]
__lowerCAmelCase = [2, 6, 3, 1, 4, 8, 5, 7]
__lowerCAmelCase = [4, 1, 3, 5, 7, 2, 8, 6]
__lowerCAmelCase = [4, 1, 2, 3, 2, 3, 4, 1]
__lowerCAmelCase = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__lowerCAmelCase = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__lowerCAmelCase = apply_table(key, paa_table)
__lowerCAmelCase = temp[:5]
__lowerCAmelCase = temp[5:]
__lowerCAmelCase = left_shift(left)
__lowerCAmelCase = left_shift(right)
__lowerCAmelCase = apply_table(left + right, pa_table)
__lowerCAmelCase = left_shift(left)
__lowerCAmelCase = left_shift(right)
__lowerCAmelCase = left_shift(left)
__lowerCAmelCase = left_shift(right)
__lowerCAmelCase = apply_table(left + right, pa_table)
# encryption
__lowerCAmelCase = apply_table(message, IP)
__lowerCAmelCase = function(expansion, sa, sa, keya, temp)
__lowerCAmelCase = temp[4:] + temp[:4]
__lowerCAmelCase = function(expansion, sa, sa, keya, temp)
__lowerCAmelCase = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
__lowerCAmelCase = apply_table(CT, IP)
__lowerCAmelCase = function(expansion, sa, sa, keya, temp)
__lowerCAmelCase = temp[4:] + temp[:4]
__lowerCAmelCase = function(expansion, sa, sa, keya, temp)
__lowerCAmelCase = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 5 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
__lowerCAmelCase = """docs/source/en/_toctree.yml"""
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Any = defaultdict(__a )
for doc in model_doc:
counts[doc["local"]] += 1
_a : List[str] = [key for key, value in counts.items() if value > 1]
_a : str = []
for duplicate_key in duplicates:
_a : Union[str, Any] = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__a , key=lambda __a : s["title"].lower() )
def UpperCAmelCase_ (__a : Optional[int]=False ):
"""simple docstring"""
with open(__a , encoding='utf-8' ) as f:
_a : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
_a : Union[str, Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_a : Union[str, Any] = content[api_idx]['sections']
# Then to the model doc
_a : List[str] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_a : List[str] = api_doc[model_idx]['sections']
_a : List[Any] = [(idx, section) for idx, section in enumerate(__a ) if 'sections' in section]
_a : Tuple = False
for idx, modality_doc in modalities_docs:
_a : List[Any] = modality_doc['sections']
_a : Any = clean_model_doc_toc(__a )
if old_modality_doc != new_modality_doc:
_a : Union[str, Any] = True
if overwrite:
_a : str = new_modality_doc
if diff:
if overwrite:
_a : Dict = model_doc
_a : Dict = api_doc
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__lowerCAmelCase = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 5 | 1 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
_a : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(__a , max_perimeter + 1 ):
_a : Any = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__a ):
_a : Tuple = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def UpperCAmelCase_ (__a : int = 1_0_0_0 ):
"""simple docstring"""
_a : Any = pythagorean_triple(__a )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'''Perimeter {solution()} has maximum solutions''')
| 5 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) != 3_2:
raise ValueError('Input must be of length 32' )
_a : Any = b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '08x' )[-8:]
_a : str = b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : List[Any] = b''
for char in message:
bit_string += format(__a , '08b' ).encode('utf-8' )
_a : int = format(len(__a ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__a ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(__a ) , 5_1_2 ):
_a : List[Any] = bit_string[pos : pos + 5_1_2]
_a : str = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '032b' )
_a : int = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__a , 2 )
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
return (a + b) % 2**3_2
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : str = preprocess(__a )
_a : Optional[int] = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
_a : int = 0x67_45_23_01
_a : Union[str, Any] = 0xEF_CD_AB_89
_a : str = 0x98_BA_DC_FE
_a : List[Any] = 0x10_32_54_76
_a : Optional[int] = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__a ):
_a : Union[str, Any] = aa
_a : List[Any] = ba
_a : List[Any] = ca
_a : Dict = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a : Optional[int] = d ^ (b & (c ^ d))
_a : Optional[Any] = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a : Optional[Any] = c ^ (d & (b ^ c))
_a : Dict = (5 * i + 1) % 1_6
elif i <= 4_7:
_a : Optional[Any] = b ^ c ^ d
_a : Dict = (3 * i + 5) % 1_6
else:
_a : int = c ^ (b | not_aa(__a ))
_a : List[str] = (7 * i) % 1_6
_a : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**3_2
_a : Union[str, Any] = d
_a : Tuple = c
_a : Optional[int] = b
_a : Union[str, Any] = sum_aa(__a , left_rotate_aa(__a , shift_amounts[i] ) )
# Add hashed chunk to running total
_a : Any = sum_aa(__a , __a )
_a : Dict = sum_aa(__a , __a )
_a : Union[str, Any] = sum_aa(__a , __a )
_a : str = sum_aa(__a , __a )
_a : Optional[Any] = reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 1 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__lowerCAmelCase = pd.read_csv("""sample_data.csv""", header=None)
__lowerCAmelCase = df.shape[:1][0]
# If you're using some other dataset input the target column
__lowerCAmelCase = df.iloc[:, 1:2]
__lowerCAmelCase = actual_data.values.reshape(len_data, 1)
__lowerCAmelCase = MinMaxScaler().fit_transform(actual_data)
__lowerCAmelCase = 1_0
__lowerCAmelCase = 5
__lowerCAmelCase = 2_0
__lowerCAmelCase = len_data - periods * look_back
__lowerCAmelCase = actual_data[:division]
__lowerCAmelCase = actual_data[division - look_back :]
__lowerCAmelCase , __lowerCAmelCase = [], []
__lowerCAmelCase , __lowerCAmelCase = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__lowerCAmelCase = np.array(train_x)
__lowerCAmelCase = np.array(test_x)
__lowerCAmelCase = np.array([list(i.ravel()) for i in train_y])
__lowerCAmelCase = np.array([list(i.ravel()) for i in test_y])
__lowerCAmelCase = Sequential()
model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(6_4, input_shape=(1_2_8, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
__lowerCAmelCase = model.fit(
x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4
)
__lowerCAmelCase = model.predict(x_test)
| 5 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase_ (__a : str , __a : Dict=0.999 , __a : List[str]="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__a : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__a : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_a : Tuple = []
for i in range(__a ):
_a : Union[str, Any] = i / num_diffusion_timesteps
_a : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__a ) / alpha_bar_fn(__a ) , __a ) )
return torch.tensor(__a , dtype=torch.floataa )
class UpperCAmelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Dict = 2
@register_to_config
def __init__( self : str ,_a : int = 1000 ,_a : float = 0.0_0085 ,_a : float = 0.012 ,_a : str = "linear" ,_a : Optional[Union[np.ndarray, List[float]]] = None ,_a : str = "epsilon" ,_a : Optional[bool] = False ,_a : Optional[bool] = False ,_a : float = 1.0 ,_a : str = "linspace" ,_a : int = 0 ,):
'''simple docstring'''
if trained_betas is not None:
_a : List[str] = torch.tensor(_a ,dtype=torch.floataa )
elif beta_schedule == "linear":
_a : Tuple = torch.linspace(_a ,_a ,_a ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a : List[str] = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,_a ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a : Dict = betas_for_alpha_bar(_a ,alpha_transform_type='cosine' )
elif beta_schedule == "exp":
_a : Tuple = betas_for_alpha_bar(_a ,alpha_transform_type='exp' )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_a : Optional[Any] = 1.0 - self.betas
_a : Optional[int] = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(_a ,_a ,_a )
_a : Optional[int] = use_karras_sigmas
def __lowercase ( self : Any ,_a : Union[str, Any] ,_a : Optional[Any]=None ):
'''simple docstring'''
if schedule_timesteps is None:
_a : List[Any] = self.timesteps
_a : Dict = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a : int = 1 if len(_a ) > 1 else 0
else:
_a : str = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
_a : str = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Union[float, torch.FloatTensor] ,):
'''simple docstring'''
_a : List[Any] = self.index_for_timestep(_a )
_a : Tuple = self.sigmas[step_index]
_a : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowercase ( self : Any ,_a : int ,_a : Union[str, torch.device] = None ,_a : Optional[int] = None ,):
'''simple docstring'''
_a : Optional[Any] = num_inference_steps
_a : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a : Optional[Any] = np.linspace(0 ,num_train_timesteps - 1 ,_a ,dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a : str = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : int = (np.arange(0 ,_a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a : Any = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : Union[str, Any] = (np.arange(_a ,0 ,-step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_a : Tuple = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a : Union[str, Any] = np.log(_a )
_a : str = np.interp(_a ,np.arange(0 ,len(_a ) ) ,_a )
if self.config.use_karras_sigmas:
_a : List[Any] = self._convert_to_karras(in_sigmas=_a ,num_inference_steps=self.num_inference_steps )
_a : Dict = np.array([self._sigma_to_t(_a ,_a ) for sigma in sigmas] )
_a : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a : Union[str, Any] = torch.from_numpy(_a ).to(device=_a )
_a : Any = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_a : List[Any] = torch.from_numpy(_a )
_a : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_a ).startswith('mps' ):
# mps does not support float64
_a : Tuple = timesteps.to(_a ,dtype=torch.floataa )
else:
_a : Dict = timesteps.to(device=_a )
# empty dt and derivative
_a : Tuple = None
_a : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a : Union[str, Any] = defaultdict(_a )
def __lowercase ( self : str ,_a : Dict ,_a : Dict ):
'''simple docstring'''
_a : Optional[int] = np.log(_a )
# get distribution
_a : Union[str, Any] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_a : List[Any] = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_a : Tuple = low_idx + 1
_a : Union[str, Any] = log_sigmas[low_idx]
_a : Optional[Any] = log_sigmas[high_idx]
# interpolate sigmas
_a : Optional[Any] = (low - log_sigma) / (low - high)
_a : List[str] = np.clip(_a ,0 ,1 )
# transform interpolation to time range
_a : Union[str, Any] = (1 - w) * low_idx + w * high_idx
_a : List[str] = t.reshape(sigma.shape )
return t
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Tuple ):
'''simple docstring'''
_a : float = in_sigmas[-1].item()
_a : float = in_sigmas[0].item()
_a : Tuple = 7.0 # 7.0 is the value used in the paper
_a : str = np.linspace(0 ,1 ,_a )
_a : Optional[Any] = sigma_min ** (1 / rho)
_a : Union[str, Any] = sigma_max ** (1 / rho)
_a : str = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return self.dt is None
def __lowercase ( self : int ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : Union[float, torch.FloatTensor] ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : bool = True ,):
'''simple docstring'''
_a : Union[str, Any] = self.index_for_timestep(_a )
# advance index counter by 1
_a : Any = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a : Tuple = self.sigmas[step_index]
_a : int = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_a : List[str] = self.sigmas[step_index - 1]
_a : List[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a : Optional[int] = 0
_a : Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a : Dict = sigma_hat if self.state_in_first_order else sigma_next
_a : Optional[int] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a : List[Any] = sigma_hat if self.state_in_first_order else sigma_next
_a : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_a : Union[str, Any] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
_a : Optional[int] = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a : Optional[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a : Any = sigma_next - sigma_hat
# store for 2nd order step
_a : int = derivative
_a : List[str] = dt
_a : Union[str, Any] = sample
else:
# 2. 2nd order / Heun's method
_a : Dict = (sample - pred_original_sample) / sigma_next
_a : Tuple = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_a : Optional[Any] = self.dt
_a : Union[str, Any] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_a : List[Any] = None
_a : Union[str, Any] = None
_a : Dict = None
_a : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __lowercase ( self : Optional[int] ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,):
'''simple docstring'''
_a : str = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
_a : Dict = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
_a : Optional[Any] = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
_a : int = self.timesteps.to(original_samples.device )
_a : Optional[Any] = timesteps.to(original_samples.device )
_a : Any = [self.index_for_timestep(_a ,_a ) for t in timesteps]
_a : Optional[int] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a : Optional[Any] = sigma.unsqueeze(-1 )
_a : Any = original_samples + noise * sigma
return noisy_samples
def __len__( self : Optional[int] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 5 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class UpperCAmelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : List[str] = '''convnextv2'''
def __init__( self : Optional[int] ,_a : Dict=3 ,_a : Any=4 ,_a : int=4 ,_a : Tuple=None ,_a : List[Any]=None ,_a : List[str]="gelu" ,_a : List[str]=0.02 ,_a : List[str]=1E-12 ,_a : Optional[int]=0.0 ,_a : Any=224 ,_a : Optional[Any]=None ,_a : Any=None ,**_a : Optional[Any] ,):
'''simple docstring'''
super().__init__(**_a )
_a : Union[str, Any] = num_channels
_a : str = patch_size
_a : int = num_stages
_a : int = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
_a : Any = [3, 3, 9, 3] if depths is None else depths
_a : Union[str, Any] = hidden_act
_a : int = initializer_range
_a : Any = layer_norm_eps
_a : str = drop_path_rate
_a : Any = image_size
_a : Optional[Any] = ['stem'] + [F"""stage{idx}""" for idx in range(1 ,len(self.depths ) + 1 )]
_a, _a : Optional[Any] = get_aligned_output_features_output_indices(
out_features=_a ,out_indices=_a ,stage_names=self.stage_names )
| 5 |
'''simple docstring'''
import qiskit
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
_a : Any = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_a : List[Any] = qiskit.QuantumCircuit(__a , __a )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_a : Tuple = qiskit.execute(__a , __a , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__a )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 5 | 1 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str , __a : str ):
"""simple docstring"""
_a : int = len(__a ) + 1
_a : List[str] = len(__a ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_a : Optional[int] = [[0 for i in range(__a )] for j in range(__a )]
# since string of zero length match pattern of zero length
_a : str = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __a ):
_a : Optional[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __a ):
_a : Dict = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __a ):
for j in range(1 , __a ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_a : Tuple = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_a : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_a : int = dp[i - 1][j]
else:
_a : Any = 0
else:
_a : Optional[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__lowerCAmelCase = """aab"""
__lowerCAmelCase = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 5 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def __lowercase ( self : str ):
'''simple docstring'''
_a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_a : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_a : List[str] = 'xvjiarui/stable-diffusion-2-inpainting'
_a, _a : str = FlaxStableDiffusionInpaintPipeline.from_pretrained(_a ,safety_checker=_a )
_a : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
_a : int = jax.random.PRNGKey(0 )
_a : Tuple = 50
_a : Any = jax.device_count()
_a : Dict = num_samples * [prompt]
_a : Optional[Any] = num_samples * [init_image]
_a : str = num_samples * [mask_image]
_a, _a, _a : Optional[Any] = pipeline.prepare_inputs(_a ,_a ,_a )
# shard inputs and rng
_a : Optional[Any] = replicate(_a )
_a : str = jax.random.split(_a ,jax.device_count() )
_a : Dict = shard(_a )
_a : int = shard(_a )
_a : int = shard(_a )
_a : Union[str, Any] = pipeline(
_a ,_a ,_a ,_a ,_a ,_a ,jit=_a )
_a : Union[str, Any] = output.images.reshape(_a ,512 ,512 ,3 )
_a : Union[str, Any] = images[0, 253:256, 253:256, -1]
_a : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_a : Union[str, Any] = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 5 | 1 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ['''image_processor''', '''tokenizer''']
__UpperCAmelCase : Union[str, Any] = '''BlipImageProcessor'''
__UpperCAmelCase : Any = '''AutoTokenizer'''
def __init__( self : Optional[Any] ,_a : Any ,_a : List[str] ,_a : Optional[int] ):
'''simple docstring'''
super().__init__(_a ,_a )
# add QFormer tokenizer
_a : Dict = qformer_tokenizer
def __call__( self : List[str] ,_a : ImageInput = None ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Optional[Any] ,):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
_a : List[str] = BatchFeature()
if text is not None:
_a : int = self.tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_token_type_ids=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
encoding.update(_a )
_a : str = self.qformer_tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_token_type_ids=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
_a : Optional[Any] = qformer_text_encoding.pop('input_ids' )
_a : Union[str, Any] = qformer_text_encoding.pop('attention_mask' )
if images is not None:
_a : Optional[int] = self.image_processor(_a ,return_tensors=_a )
encoding.update(_a )
return encoding
def __lowercase ( self : str ,*_a : List[Any] ,**_a : str ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def __lowercase ( self : int ,*_a : Tuple ,**_a : Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Union[str, Any] = self.tokenizer.model_input_names
_a : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def __lowercase ( self : List[str] ,_a : Union[str, Any] ,**_a : Tuple ):
'''simple docstring'''
if os.path.isfile(_a ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(_a ,exist_ok=_a )
_a : List[Any] = os.path.join(_a ,'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(_a )
return super().save_pretrained(_a ,**_a )
@classmethod
def __lowercase ( cls : Union[str, Any] ,_a : Optional[int] ,**_a : int ):
'''simple docstring'''
_a : List[str] = AutoTokenizer.from_pretrained(_a ,subfolder='qformer_tokenizer' )
_a : str = cls._get_arguments_from_pretrained(_a ,**_a )
args.append(_a )
return cls(*_a )
| 5 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str , __a : str ):
"""simple docstring"""
_a : int = len(__a ) + 1
_a : List[str] = len(__a ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_a : Optional[int] = [[0 for i in range(__a )] for j in range(__a )]
# since string of zero length match pattern of zero length
_a : str = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __a ):
_a : Optional[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __a ):
_a : Dict = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __a ):
for j in range(1 , __a ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_a : Tuple = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_a : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_a : int = dp[i - 1][j]
else:
_a : Any = 0
else:
_a : Optional[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__lowerCAmelCase = """aab"""
__lowerCAmelCase = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 5 | 1 |
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def UpperCAmelCase_ (__a : Iterable[str] , __a : int ):
"""simple docstring"""
_a : str = iter(__a )
while True:
_a : Tuple = tuple(itertools.islice(__a , __a ) )
if not chunk:
return
yield chunk
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : int = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
_a : str = ''
if len(__a ) < 2:
return dirty
for i in range(len(__a ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(__a ) & 1:
clean += "X"
return clean
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Union[str, Any] = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
_a : Optional[int] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(__a )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(__a )
return table
def UpperCAmelCase_ (__a : str , __a : str ):
"""simple docstring"""
_a : Union[str, Any] = generate_table(__a )
_a : int = prepare_input(__a )
_a : int = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__a , 2 ):
_a, _a : Dict = divmod(table.index(__a ) , 5 )
_a, _a : str = divmod(table.index(__a ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def UpperCAmelCase_ (__a : str , __a : str ):
"""simple docstring"""
_a : Dict = generate_table(__a )
_a : Optional[int] = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__a , 2 ):
_a, _a : str = divmod(table.index(__a ) , 5 )
_a, _a : Optional[int] = divmod(table.index(__a ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 5 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = BlenderbotSmallTokenizer
__UpperCAmelCase : Tuple = False
def __lowercase ( self : List[Any] ):
'''simple docstring'''
super().setUp()
_a : List[str] = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
_a : Tuple = dict(zip(_a ,range(len(_a ) ) ) )
_a : List[Any] = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
_a : List[Any] = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
_a : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_a : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(_a ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(_a ) )
def __lowercase ( self : List[Any] ,**_a : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : Tuple ,_a : int ):
'''simple docstring'''
_a : Optional[Any] = 'adapt act apte'
_a : Dict = 'adapt act apte'
return input_text, output_text
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_a : Union[str, Any] = 'adapt act apte'
_a : Dict = ['adapt', 'act', 'ap@@', 'te']
_a : Tuple = tokenizer.tokenize(_a )
self.assertListEqual(_a ,_a )
_a : List[str] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_a : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : str = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
_a : Union[str, Any] = 'I am a small frog.'
_a : int = tok([src_text] ,padding=_a ,truncation=_a )['input_ids']
_a : str = tok.batch_decode(_a ,skip_special_tokens=_a ,clean_up_tokenization_spaces=_a )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
_a : Union[str, Any] = 'I am a small frog .'
_a : Optional[Any] = '.'
_a : Optional[Any] = tok(_a )['input_ids']
_a : Union[str, Any] = tok(_a )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 5 | 1 |
'''simple docstring'''
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
__lowerCAmelCase = getLogger(__name__)
def UpperCAmelCase_ (__a : List[str] , __a : str , __a : str , __a : int = 8 , __a : int = 1_0_2_4 , __a : Optional[Any]="val" , __a : str=None , __a : int=False , __a : int="summarization" , __a : str=None , __a : str=1 , __a : Dict = None , __a : Union[str, Any]="" , **__a : List[Any] , ):
"""simple docstring"""
_a : Union[str, Any] = str(__a )
assert local_rank is not None
torch.distributed.init_process_group(backend='nccl' , rank=__a )
_a : List[Any] = Path(__a )
_a : Optional[Any] = save_dir.joinpath(f"""rank_{local_rank}_output.json""" )
torch.cuda.set_device(__a )
_a : int = AutoModelForSeqaSeqLM.from_pretrained(__a ).cuda()
if fpaa:
_a : List[Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(__a , __a ) # update config with task specific params
_a : str = generate_kwargs.pop('num_beams' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
_a : Optional[Any] = num_return_sequences
_a : int = AutoTokenizer.from_pretrained(__a )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
if max_source_length is None:
_a : Dict = tokenizer.model_max_length
if prefix is None:
_a : Any = prefix or getattr(model.config , 'prefix' , '' ) or ''
_a : List[str] = SeqaSeqDataset(
__a , __a , __a , max_target_length=1_0_2_4 , type_path=__a , n_obs=__a , prefix=__a , **__a , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
_a : List[str] = ds.make_sortish_sampler(__a , distributed=__a , add_extra_examples=__a , shuffle=__a )
_a : List[Any] = DataLoader(__a , sampler=__a , batch_size=__a , collate_fn=ds.collate_fn )
_a : Optional[Any] = []
for batch in tqdm(__a ):
_a : List[str] = model.generate(
input_ids=batch['input_ids'].to(model.device ) , attention_mask=batch['attention_mask'].to(model.device ) , num_return_sequences=__a , num_beams=__a , **__a , )
_a : Dict = tokenizer.batch_decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
_a : Any = batch['ids']
if num_return_sequences > 1:
_a : Tuple = chunks(__a , __a ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(__a ):
results.append({'pred': pred, 'id': ids[i].item()} )
save_json(__a , __a )
return results, sampler.num_replicas
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Tuple = argparse.ArgumentParser(
epilog='Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate' )
parser.add_argument('--data_dir' , type=__a , help='like cnn_dm/test.source' )
parser.add_argument(
'--model_name' , type=__a , help='like facebook/bart-large-cnn,t5-base, etc.' , default='sshleifer/distilbart-xsum-12-3' , )
parser.add_argument('--save_dir' , type=__a , help='where to save' , default='tmp_gen' )
parser.add_argument('--max_source_length' , type=__a , default=__a )
parser.add_argument(
'--type_path' , type=__a , default='test' , help='which subset to evaluate typically train/val/test' )
parser.add_argument('--task' , type=__a , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=__a , default=8 , required=__a , help='batch size' )
parser.add_argument(
'--local_rank' , type=__a , default=-1 , required=__a , help='should be passed by distributed.launch' )
parser.add_argument(
'--n_obs' , type=__a , default=__a , required=__a , help='How many observations. Defaults to all.' )
parser.add_argument(
'--num_return_sequences' , type=__a , default=1 , required=__a , help='How many sequences to return' )
parser.add_argument(
'--sync_timeout' , type=__a , default=6_0_0 , required=__a , help='How long should master process wait for other processes to finish.' , )
parser.add_argument('--src_lang' , type=__a , default=__a , required=__a )
parser.add_argument('--tgt_lang' , type=__a , default=__a , required=__a )
parser.add_argument(
'--prefix' , type=__a , required=__a , default=__a , help='will be added to the begininng of src examples' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--debug' , action='store_true' )
_a : Any = time.time()
_a, _a : Optional[int] = parser.parse_known_args()
_a : List[Any] = parse_numeric_n_bool_cl_kwargs(__a )
if generate_kwargs and args.local_rank <= 0:
print(f"""parsed the following generate kwargs: {generate_kwargs}""" )
_a : int = Path(args.save_dir + '_tmp' )
Path(__a ).mkdir(exist_ok=__a ) # this handles locking.
_a : List[Any] = list(json_save_dir.glob('rank_*.json' ) )
if intermediate_files:
raise ValueError(f"""Found files at {json_save_dir} please move or remove them.""" )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
_a : Optional[Any] = {}
if args.src_lang is not None:
_a : Any = args.src_lang
if args.tgt_lang is not None:
_a : Optional[int] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=__a )
_a, _a : Optional[Any] = eval_data_dir(
args.data_dir , __a , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__a , **__a , )
if args.local_rank <= 0:
_a : str = Path(args.save_dir )
save_dir.mkdir(exist_ok=__a )
_a : Optional[int] = gather_results_from_each_node(__a , __a , args.sync_timeout )
_a : Optional[int] = combine_partial_results(__a )
if args.num_return_sequences > 1:
_a : List[Any] = save_dir.joinpath('pseudolabel_results.json' )
print(f"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" )
save_json(__a , __a )
return
_a : List[str] = Path(args.data_dir ).joinpath(args.type_path + '.target' )
with open(__a ) as f:
_a : List[Any] = [x.rstrip() for x in f.readlines()][: len(__a )]
# Calculate metrics, save metrics, and save _generations.txt
_a : List[str] = 'translation' in args.task
_a : Optional[int] = calculate_bleu if calc_bleu else calculate_rouge
_a : Dict = 'bleu' if calc_bleu else 'rouge'
_a : Dict = score_fn(__a , __a )
_a : str = len(__a )
_a : Tuple = time.time() - start_time
_a : List[str] = round(runtime / metrics['n_obs'] , 4 )
_a : Dict = num_replicas
# TODO(@stas00): add whatever metadata to metrics
_a : Optional[Any] = save_dir.joinpath(f"""{args.type_path}_{metric_name}.json""" )
save_json(__a , __a , indent=__a )
print(__a )
write_txt_file(__a , save_dir.joinpath(f"""{args.type_path}_generations.txt""" ) )
if args.debug:
write_txt_file(__a , save_dir.joinpath(f"""{args.type_path}.target""" ) )
else:
shutil.rmtree(__a )
def UpperCAmelCase_ (__a : List[Any] ):
"""simple docstring"""
_a : int = []
for partial_result in partial_results:
records.extend(__a )
_a : Optional[int] = sorted(__a , key=lambda __a : x["id"] )
_a : Any = [x['pred'] for x in records]
return preds
def UpperCAmelCase_ (__a : int , __a : Optional[int] , __a : Any ):
"""simple docstring"""
_a : List[Any] = time.time()
logger.info('waiting for all nodes to finish' )
_a : List[Any] = None
while (time.time() - start_wait) < timeout:
_a : Optional[int] = list(save_dir.glob('rank_*.json' ) )
if len(__a ) < num_replicas:
continue
try:
# make sure all json files are fully saved
_a : Optional[int] = lmap(__a , __a )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('Rank 0 gave up on waiting for other processes' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 5 |
'''simple docstring'''
__lowerCAmelCase = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__lowerCAmelCase = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = 'Morse code here!'
print(__a )
_a : Tuple = encrypt(__a )
print(__a )
_a : str = decrypt(__a )
print(__a )
if __name__ == "__main__":
main()
| 5 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowerCAmelCase = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def UpperCAmelCase_ (__a : Union[str, Any] , __a : List[Any] , __a : Any=8 ):
"""simple docstring"""
_a : Tuple = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_a : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] ,_a : UNetaDConditionModel ,_a : DDPMScheduler ,_a : VQModel ,):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=_a ,scheduler=_a ,movq=_a ,)
_a : Any = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowercase ( self : List[Any] ,_a : List[Any] ,_a : Optional[int] ,_a : Any ,_a : Any ,_a : Dict ,_a : List[str] ):
'''simple docstring'''
if latents is None:
_a : int = randn_tensor(_a ,generator=_a ,device=_a ,dtype=_a )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
_a : List[str] = latents.to(_a )
_a : List[str] = latents * scheduler.init_noise_sigma
return latents
def __lowercase ( self : str ,_a : Tuple=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
_a : List[str] = torch.device(F"""cuda:{gpu_id}""" )
_a : str = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_a ,_a )
def __lowercase ( self : Optional[int] ,_a : Optional[Any]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('>=' ,'0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
_a : str = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' ,silence_dtype_warnings=_a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_a : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
_a, _a : int = cpu_offload_with_hook(_a ,_a ,prev_module_hook=_a )
# We'll offload the last model manually.
_a : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
if not hasattr(self.unet ,'_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a ,'_hf_hook' )
and hasattr(module._hf_hook ,'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_a )
def __call__( self : List[str] ,_a : Union[torch.FloatTensor, List[torch.FloatTensor]] ,_a : Union[torch.FloatTensor, List[torch.FloatTensor]] ,_a : torch.FloatTensor ,_a : int = 512 ,_a : int = 512 ,_a : int = 100 ,_a : float = 4.0 ,_a : int = 1 ,_a : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,_a : Optional[torch.FloatTensor] = None ,_a : Optional[str] = "pil" ,_a : bool = True ,):
'''simple docstring'''
_a : List[Any] = self._execution_device
_a : Union[str, Any] = guidance_scale > 1.0
if isinstance(_a ,_a ):
_a : int = torch.cat(_a ,dim=0 )
if isinstance(_a ,_a ):
_a : Any = torch.cat(_a ,dim=0 )
if isinstance(_a ,_a ):
_a : Optional[int] = torch.cat(_a ,dim=0 )
_a : Optional[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
_a : List[str] = image_embeds.repeat_interleave(_a ,dim=0 )
_a : Union[str, Any] = negative_image_embeds.repeat_interleave(_a ,dim=0 )
_a : int = hint.repeat_interleave(_a ,dim=0 )
_a : int = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=_a )
_a : str = torch.cat([hint, hint] ,dim=0 ).to(dtype=self.unet.dtype ,device=_a )
self.scheduler.set_timesteps(_a ,device=_a )
_a : int = self.scheduler.timesteps
_a : Any = self.movq.config.latent_channels
_a, _a : Union[str, Any] = downscale_height_and_width(_a ,_a ,self.movq_scale_factor )
# create initial latent
_a : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) ,image_embeds.dtype ,_a ,_a ,_a ,self.scheduler ,)
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
_a : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_a : Optional[Any] = {'image_embeds': image_embeds, 'hint': hint}
_a : Dict = self.unet(
sample=_a ,timestep=_a ,encoder_hidden_states=_a ,added_cond_kwargs=_a ,return_dict=_a ,)[0]
if do_classifier_free_guidance:
_a, _a : Union[str, Any] = noise_pred.split(latents.shape[1] ,dim=1 )
_a, _a : Union[str, Any] = noise_pred.chunk(2 )
_a, _a : int = variance_pred.chunk(2 )
_a : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_a : Dict = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_a, _a : Union[str, Any] = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_a : Optional[Any] = self.scheduler.step(
_a ,_a ,_a ,generator=_a ,)[0]
# post-processing
_a : Optional[Any] = self.movq.decode(_a ,force_not_quantize=_a )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_a : Dict = image * 0.5 + 0.5
_a : Dict = image.clamp(0 ,1 )
_a : Any = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
_a : List[str] = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 5 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls : Optional[Any] ):
'''simple docstring'''
_a : List[Any] = TOKEN
HfFolder.save_token(_a )
@classmethod
def __lowercase ( cls : List[Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-config' )
except HTTPError:
pass
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Any = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('test-config' ,use_auth_token=self._token )
_a : Optional[Any] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a ,repo_id='test-config' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Dict = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Tuple = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' ,use_auth_token=self._token )
_a : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a ,repo_id='valid_org/test-config-org' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
_a : Optional[Any] = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map ,{'AutoConfig': 'custom_configuration.CustomConfig'} )
_a : int = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" ,trust_remote_code=_a )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ ,'CustomConfig' )
self.assertEqual(new_config.attribute ,42 )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_a : int = c.n_embd + 1 # int
_a : str = c.resid_pdrop + 1.0 # float
_a : Dict = not c.scale_attn_weights # bool
_a : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(_a ,c.n_embd ,'mismatch for key: n_embd' )
self.assertEqual(_a ,c.resid_pdrop ,'mismatch for key: resid_pdrop' )
self.assertEqual(_a ,c.scale_attn_weights ,'mismatch for key: scale_attn_weights' )
self.assertEqual(_a ,c.summary_type ,'mismatch for key: summary_type' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : int = PretrainedConfig()
_a : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_a ,['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_a : Dict = [key for key, value in config_common_kwargs.items() if value == getattr(_a ,_a )]
if len(_a ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F""" {', '.join(_a )}.""" )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaises(_a ):
# config is in subfolder, the following should not work without specifying the subfolder
_a : List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_a : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' ,subfolder='bert' )
self.assertIsNotNone(_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[Any] = mock.Mock()
_a : Any = 500
_a : Any = {}
_a : Any = HTTPError
_a : List[Any] = {}
# Download this model to make sure it's in the cache.
_a : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' ,return_value=_a ) as mock_head:
_a : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : int = AutoConfig.from_pretrained('bert-base-cased' )
_a : List[str] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_a )
_a : str = 2
json.dump(configuration.to_dict() ,open(os.path.join(_a ,'config.4.0.0.json' ) ,'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_a : int = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size ,2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_a : Tuple = ['config.42.0.0.json']
_a : int = 768
configuration.save_pretrained(_a )
shutil.move(os.path.join(_a ,'config.4.0.0.json' ) ,os.path.join(_a ,'config.42.0.0.json' ) )
_a : int = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size ,768 )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Tuple = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
_a : Optional[int] = 'v4.0.0'
_a, _a : Tuple = new_transformers.models.auto.AutoConfig.from_pretrained(
_a ,return_unused_kwargs=_a )
self.assertEqual(new_configuration.hidden_size ,2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_a ,{} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_a : str = 'v3.0.0'
_a : Optional[Any] = old_transformers.models.auto.AutoConfig.from_pretrained(_a )
self.assertEqual(old_configuration.hidden_size ,768 )
| 5 | 1 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__lowerCAmelCase = logging.get_logger(__name__)
# General docstring
__lowerCAmelCase = """ResNetConfig"""
# Base docstring
__lowerCAmelCase = """microsoft/resnet-50"""
__lowerCAmelCase = [1, 2_0_4_8, 7, 7]
# Image classification docstring
__lowerCAmelCase = """microsoft/resnet-50"""
__lowerCAmelCase = """tiger cat"""
__lowerCAmelCase = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] ,_a : int ,_a : int ,_a : int = 3 ,_a : int = 1 ,_a : str = "relu" ):
'''simple docstring'''
super().__init__()
_a : str = nn.Convad(
_a ,_a ,kernel_size=_a ,stride=_a ,padding=kernel_size // 2 ,bias=_a )
_a : List[str] = nn.BatchNormad(_a )
_a : int = ACTaFN[activation] if activation is not None else nn.Identity()
def __lowercase ( self : Tuple ,_a : Tensor ):
'''simple docstring'''
_a : str = self.convolution(_a )
_a : Any = self.normalization(_a )
_a : Optional[Any] = self.activation(_a )
return hidden_state
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple ,_a : ResNetConfig ):
'''simple docstring'''
super().__init__()
_a : Dict = ResNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=7 ,stride=2 ,activation=config.hidden_act )
_a : int = nn.MaxPoolad(kernel_size=3 ,stride=2 ,padding=1 )
_a : Optional[int] = config.num_channels
def __lowercase ( self : List[str] ,_a : Tensor ):
'''simple docstring'''
_a : Optional[Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_a : Any = self.embedder(_a )
_a : List[str] = self.pooler(_a )
return embedding
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] ,_a : int ,_a : int ,_a : int = 2 ):
'''simple docstring'''
super().__init__()
_a : Union[str, Any] = nn.Convad(_a ,_a ,kernel_size=1 ,stride=_a ,bias=_a )
_a : Dict = nn.BatchNormad(_a )
def __lowercase ( self : Tuple ,_a : Tensor ):
'''simple docstring'''
_a : List[str] = self.convolution(_a )
_a : str = self.normalization(_a )
return hidden_state
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ,_a : int ,_a : int ,_a : int = 1 ,_a : str = "relu" ):
'''simple docstring'''
super().__init__()
_a : List[Any] = in_channels != out_channels or stride != 1
_a : Union[str, Any] = (
ResNetShortCut(_a ,_a ,stride=_a ) if should_apply_shortcut else nn.Identity()
)
_a : Tuple = nn.Sequential(
ResNetConvLayer(_a ,_a ,stride=_a ) ,ResNetConvLayer(_a ,_a ,activation=_a ) ,)
_a : str = ACTaFN[activation]
def __lowercase ( self : List[str] ,_a : int ):
'''simple docstring'''
_a : Optional[Any] = hidden_state
_a : List[str] = self.layer(_a )
_a : Dict = self.shortcut(_a )
hidden_state += residual
_a : List[Any] = self.activation(_a )
return hidden_state
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] ,_a : int ,_a : int ,_a : int = 1 ,_a : str = "relu" ,_a : int = 4 ):
'''simple docstring'''
super().__init__()
_a : Optional[int] = in_channels != out_channels or stride != 1
_a : List[str] = out_channels // reduction
_a : Optional[Any] = (
ResNetShortCut(_a ,_a ,stride=_a ) if should_apply_shortcut else nn.Identity()
)
_a : int = nn.Sequential(
ResNetConvLayer(_a ,_a ,kernel_size=1 ) ,ResNetConvLayer(_a ,_a ,stride=_a ) ,ResNetConvLayer(_a ,_a ,kernel_size=1 ,activation=_a ) ,)
_a : Any = ACTaFN[activation]
def __lowercase ( self : Tuple ,_a : int ):
'''simple docstring'''
_a : str = hidden_state
_a : Optional[int] = self.layer(_a )
_a : List[str] = self.shortcut(_a )
hidden_state += residual
_a : Dict = self.activation(_a )
return hidden_state
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] ,_a : ResNetConfig ,_a : int ,_a : int ,_a : int = 2 ,_a : int = 2 ,):
'''simple docstring'''
super().__init__()
_a : Tuple = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_a : Tuple = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(_a ,_a ,stride=_a ,activation=config.hidden_act ) ,*[layer(_a ,_a ,activation=config.hidden_act ) for _ in range(depth - 1 )] ,)
def __lowercase ( self : Dict ,_a : Tensor ):
'''simple docstring'''
_a : Tuple = input
for layer in self.layers:
_a : Dict = layer(_a )
return hidden_state
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] ,_a : ResNetConfig ):
'''simple docstring'''
super().__init__()
_a : List[str] = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
_a ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
_a : int = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_a ,config.depths[1:] ):
self.stages.append(ResNetStage(_a ,_a ,_a ,depth=_a ) )
def __lowercase ( self : str ,_a : Tensor ,_a : bool = False ,_a : bool = True ):
'''simple docstring'''
_a : Any = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_a : int = hidden_states + (hidden_state,)
_a : int = stage_module(_a )
if output_hidden_states:
_a : int = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=_a ,hidden_states=_a ,)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ResNetConfig
__UpperCAmelCase : List[str] = '''resnet'''
__UpperCAmelCase : int = '''pixel_values'''
__UpperCAmelCase : int = True
def __lowercase ( self : Tuple ,_a : int ):
'''simple docstring'''
if isinstance(_a ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode='fan_out' ,nonlinearity='relu' )
elif isinstance(_a ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def __lowercase ( self : str ,_a : int ,_a : List[str]=False ):
'''simple docstring'''
if isinstance(_a ,_a ):
_a : Any = value
__lowerCAmelCase = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
__lowerCAmelCase = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , lowercase__ , )
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[Any] ,_a : Tuple ):
'''simple docstring'''
super().__init__(_a )
_a : Optional[Any] = config
_a : str = ResNetEmbeddings(_a )
_a : Optional[int] = ResNetEncoder(_a )
_a : Dict = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_a ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def __lowercase ( self : Optional[int] ,_a : Tensor ,_a : Optional[bool] = None ,_a : Optional[bool] = None ):
'''simple docstring'''
_a : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
_a : List[Any] = self.embedder(_a )
_a : Union[str, Any] = self.encoder(
_a ,output_hidden_states=_a ,return_dict=_a )
_a : List[str] = encoder_outputs[0]
_a : str = self.pooler(_a )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_a ,pooler_output=_a ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , lowercase__ , )
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] ,_a : Tuple ):
'''simple docstring'''
super().__init__(_a )
_a : Dict = config.num_labels
_a : Any = ResNetModel(_a )
# classification head
_a : List[Any] = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_a ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def __lowercase ( self : Dict ,_a : Optional[torch.FloatTensor] = None ,_a : Optional[torch.LongTensor] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,):
'''simple docstring'''
_a : Any = return_dict if return_dict is not None else self.config.use_return_dict
_a : str = self.resnet(_a ,output_hidden_states=_a ,return_dict=_a )
_a : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
_a : Any = self.classifier(_a )
_a : List[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_a : Union[str, Any] = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_a : List[str] = 'single_label_classification'
else:
_a : int = 'multi_label_classification'
if self.config.problem_type == "regression":
_a : Tuple = MSELoss()
if self.num_labels == 1:
_a : Optional[int] = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
_a : int = loss_fct(_a ,_a )
elif self.config.problem_type == "single_label_classification":
_a : Dict = CrossEntropyLoss()
_a : Dict = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_a : List[str] = BCEWithLogitsLoss()
_a : List[str] = loss_fct(_a ,_a )
if not return_dict:
_a : Any = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_a ,logits=_a ,hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , lowercase__ , )
class UpperCAmelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
def __init__( self : List[str] ,_a : Optional[int] ):
'''simple docstring'''
super().__init__(_a )
super()._init_backbone(_a )
_a : List[str] = [config.embedding_size] + config.hidden_sizes
_a : Optional[Any] = ResNetEmbeddings(_a )
_a : Dict = ResNetEncoder(_a )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@replace_return_docstrings(output_type=_a ,config_class=_CONFIG_FOR_DOC )
def __lowercase ( self : Optional[int] ,_a : Tensor ,_a : Optional[bool] = None ,_a : Optional[bool] = None ):
'''simple docstring'''
_a : Any = return_dict if return_dict is not None else self.config.use_return_dict
_a : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a : Optional[int] = self.embedder(_a )
_a : Tuple = self.encoder(_a ,output_hidden_states=_a ,return_dict=_a )
_a : Union[str, Any] = outputs.hidden_states
_a : Optional[Any] = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_a : List[str] = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=_a ,hidden_states=outputs.hidden_states if output_hidden_states else None ,attentions=_a ,)
| 5 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__lowerCAmelCase = {
"""169M""": 1_2,
"""430M""": 2_4,
"""1B5""": 2_4,
"""3B""": 3_2,
"""7B""": 3_2,
"""14B""": 4_0,
}
__lowerCAmelCase = {
"""169M""": 7_6_8,
"""430M""": 1_0_2_4,
"""1B5""": 2_0_4_8,
"""3B""": 2_5_6_0,
"""7B""": 4_0_9_6,
"""14B""": 5_1_2_0,
}
def UpperCAmelCase_ (__a : Dict ):
"""simple docstring"""
_a : List[Any] = list(state_dict.keys() )
for name in state_dict_keys:
_a : List[Any] = state_dict.pop(__a )
# emb -> embedding
if name.startswith('emb.' ):
_a : List[str] = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
_a : Dict = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
_a : int = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , __a )
# ffn -> feed_forward
_a : str = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , __a )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
_a : Any = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
_a : int = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
_a : Tuple = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
_a : Tuple = 'rwkv.' + name
_a : List[Any] = weight
return state_dict
def UpperCAmelCase_ (__a : Tuple , __a : Union[str, Any] , __a : List[str] , __a : str=None , __a : List[str]=None , __a : int=False , __a : int=None ):
"""simple docstring"""
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
_a : List[Any] = 5_0_2_7_7
_a : Optional[Any] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
_a : Optional[Any] = PreTrainedTokenizerFast(tokenizer_file=__a )
_a : List[Any] = len(__a )
tokenizer.save_pretrained(__a )
# 2. Build the config
_a : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_a : str = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
_a : str = RwkvConfig(
vocab_size=__a , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__a )
# 3. Download model file then convert state_dict
_a : Tuple = hf_hub_download(__a , __a )
_a : Optional[int] = torch.load(__a , map_location='cpu' )
_a : Dict = convert_state_dict(__a )
# 4. Split in shards and save
_a, _a : List[Any] = shard_checkpoint(__a )
for shard_file, shard in shards.items():
torch.save(__a , os.path.join(__a , __a ) )
if index is not None:
_a : Dict = os.path.join(__a , __a )
# Save the index as well
with open(__a , 'w' , encoding='utf-8' ) as f:
_a : List[Any] = json.dumps(__a , indent=2 , sort_keys=__a ) + '\n'
f.write(__a )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
_a : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_a : Optional[Any] = torch.load(os.path.join(__a , __a ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__a , __a ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
_a : List[str] = AutoModelForCausalLM.from_pretrained(__a )
model.push_to_hub(__a , max_shard_size='2GB' )
tokenizer.push_to_hub(__a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
__lowerCAmelCase = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 5 | 1 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__lowerCAmelCase = threading.Lock()
__lowerCAmelCase = None
__lowerCAmelCase = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
__lowerCAmelCase = logging.WARNING
__lowerCAmelCase = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Dict = os.getenv('TRANSFORMERS_VERBOSITY' , __a )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def UpperCAmelCase_ ():
"""simple docstring"""
return __name__.split('.' )[0]
def UpperCAmelCase_ ():
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_a : str = logging.StreamHandler() # Set sys.stderr as stream.
_a : Optional[Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
_a : List[Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_a : List[str] = False
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
_a : int = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_a : str = None
def UpperCAmelCase_ ():
"""simple docstring"""
return log_levels
def UpperCAmelCase_ (__a : Optional[str] = None ):
"""simple docstring"""
if name is None:
_a : List[Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__a )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : Union[str, Any] = False
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : Dict = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = _get_library_root_logger().handlers
for handler in handlers:
_a : Union[str, Any] = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Union[str, Any] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__a )
def UpperCAmelCase_ (self : Union[str, Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ):
"""simple docstring"""
_a : Union[str, Any] = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , __a )
if no_advisory_warnings:
return
self.warning(*__a , **__a )
__lowerCAmelCase = warning_advice
@functools.lru_cache(__a )
def UpperCAmelCase_ (self : int , *__a : Optional[Any] , **__a : Any ):
"""simple docstring"""
self.warning(*__a , **__a )
__lowerCAmelCase = warning_once
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Any ,*_a : Tuple ,**_a : int ): # pylint: disable=unused-argument
'''simple docstring'''
_a : int = args[0] if args else None
def __iter__( self : str ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : List[Any] ,_a : int ):
'''simple docstring'''
def empty_fn(*_a : Optional[Any] ,**_a : Any ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : List[str] ):
'''simple docstring'''
return self
def __exit__( self : List[str] ,_a : str ,_a : List[Any] ,_a : str ):
'''simple docstring'''
return
class UpperCAmelCase__ :
"""simple docstring"""
def __call__( self : Union[str, Any] ,*_a : Tuple ,**_a : Tuple ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*_a ,**_a )
else:
return EmptyTqdm(*_a ,**_a )
def __lowercase ( self : str ,*_a : List[Any] ,**_a : Any ):
'''simple docstring'''
_a : Any = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_a ,**_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__lowerCAmelCase = _tqdm_cls()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : str = True
hf_hub_utils.enable_progress_bars()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : Dict = False
hf_hub_utils.disable_progress_bars()
| 5 |
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
__lowerCAmelCase = datasets.logging.get_logger(__name__)
__lowerCAmelCase = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
__lowerCAmelCase = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
__lowerCAmelCase = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://unbabel.github.io/COMET/html/index.html' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'sources': datasets.Value('string' ,id='sequence' ),
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/Unbabel/COMET'] ,reference_urls=[
'https://github.com/Unbabel/COMET',
'https://www.aclweb.org/anthology/2020.emnlp-main.213/',
'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6',
] ,)
def __lowercase ( self : int ,_a : int ):
'''simple docstring'''
if self.config_name == "default":
_a : List[Any] = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) )
else:
_a : List[str] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __lowercase ( self : Tuple ,_a : List[Any] ,_a : Dict ,_a : Optional[Any] ,_a : List[str]=None ,_a : Tuple=False ):
'''simple docstring'''
if gpus is None:
_a : str = 1 if torch.cuda.is_available() else 0
_a : Optional[Any] = {'src': sources, 'mt': predictions, 'ref': references}
_a : Optional[Any] = [dict(zip(_a ,_a ) ) for t in zip(*data.values() )]
_a, _a : Tuple = self.scorer.predict(_a ,gpus=_a ,progress_bar=_a )
return {"mean_score": mean_score, "scores": scores}
| 5 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = BlenderbotSmallTokenizer
__UpperCAmelCase : Tuple = False
def __lowercase ( self : List[Any] ):
'''simple docstring'''
super().setUp()
_a : List[str] = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
_a : Tuple = dict(zip(_a ,range(len(_a ) ) ) )
_a : List[Any] = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
_a : List[Any] = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
_a : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_a : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(_a ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(_a ) )
def __lowercase ( self : List[Any] ,**_a : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : Tuple ,_a : int ):
'''simple docstring'''
_a : Optional[Any] = 'adapt act apte'
_a : Dict = 'adapt act apte'
return input_text, output_text
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_a : Union[str, Any] = 'adapt act apte'
_a : Dict = ['adapt', 'act', 'ap@@', 'te']
_a : Tuple = tokenizer.tokenize(_a )
self.assertListEqual(_a ,_a )
_a : List[str] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_a : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : str = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
_a : Union[str, Any] = 'I am a small frog.'
_a : int = tok([src_text] ,padding=_a ,truncation=_a )['input_ids']
_a : str = tok.batch_decode(_a ,skip_special_tokens=_a ,clean_up_tokenization_spaces=_a )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
_a : Union[str, Any] = 'I am a small frog .'
_a : Optional[Any] = '.'
_a : Optional[Any] = tok(_a )['input_ids']
_a : Union[str, Any] = tok(_a )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 5 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 | 1 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls : Optional[Any] ):
'''simple docstring'''
_a : List[Any] = TOKEN
HfFolder.save_token(_a )
@classmethod
def __lowercase ( cls : List[Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-config' )
except HTTPError:
pass
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Any = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('test-config' ,use_auth_token=self._token )
_a : Optional[Any] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a ,repo_id='test-config' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Dict = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Tuple = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' ,use_auth_token=self._token )
_a : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a ,repo_id='valid_org/test-config-org' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
_a : Optional[Any] = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map ,{'AutoConfig': 'custom_configuration.CustomConfig'} )
_a : int = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" ,trust_remote_code=_a )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ ,'CustomConfig' )
self.assertEqual(new_config.attribute ,42 )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_a : int = c.n_embd + 1 # int
_a : str = c.resid_pdrop + 1.0 # float
_a : Dict = not c.scale_attn_weights # bool
_a : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(_a ,c.n_embd ,'mismatch for key: n_embd' )
self.assertEqual(_a ,c.resid_pdrop ,'mismatch for key: resid_pdrop' )
self.assertEqual(_a ,c.scale_attn_weights ,'mismatch for key: scale_attn_weights' )
self.assertEqual(_a ,c.summary_type ,'mismatch for key: summary_type' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : int = PretrainedConfig()
_a : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_a ,['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_a : Dict = [key for key, value in config_common_kwargs.items() if value == getattr(_a ,_a )]
if len(_a ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F""" {', '.join(_a )}.""" )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaises(_a ):
# config is in subfolder, the following should not work without specifying the subfolder
_a : List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_a : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' ,subfolder='bert' )
self.assertIsNotNone(_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[Any] = mock.Mock()
_a : Any = 500
_a : Any = {}
_a : Any = HTTPError
_a : List[Any] = {}
# Download this model to make sure it's in the cache.
_a : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' ,return_value=_a ) as mock_head:
_a : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : int = AutoConfig.from_pretrained('bert-base-cased' )
_a : List[str] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_a )
_a : str = 2
json.dump(configuration.to_dict() ,open(os.path.join(_a ,'config.4.0.0.json' ) ,'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_a : int = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size ,2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_a : Tuple = ['config.42.0.0.json']
_a : int = 768
configuration.save_pretrained(_a )
shutil.move(os.path.join(_a ,'config.4.0.0.json' ) ,os.path.join(_a ,'config.42.0.0.json' ) )
_a : int = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size ,768 )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Tuple = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
_a : Optional[int] = 'v4.0.0'
_a, _a : Tuple = new_transformers.models.auto.AutoConfig.from_pretrained(
_a ,return_unused_kwargs=_a )
self.assertEqual(new_configuration.hidden_size ,2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_a ,{} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_a : str = 'v3.0.0'
_a : Optional[Any] = old_transformers.models.auto.AutoConfig.from_pretrained(_a )
self.assertEqual(old_configuration.hidden_size ,768 )
| 5 |
'''simple docstring'''
import sys
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : List[str] = len(__a )
_a : Dict = [[0 for x in range(__a )] for x in range(__a )]
_a : Union[str, Any] = [[0 for x in range(__a )] for x in range(__a )]
for chain_length in range(2 , __a ):
for a in range(1 , n - chain_length + 1 ):
_a : Tuple = a + chain_length - 1
_a : Any = sys.maxsize
for c in range(__a , __a ):
_a : Optional[Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_a : Dict = cost
_a : Any = c
return matrix, sol
def UpperCAmelCase_ (__a : Tuple , __a : List[str] , __a : Dict ):
"""simple docstring"""
if i == j:
print('A' + str(__a ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(__a , __a , optimal_solution[i][j] )
print_optiomal_solution(__a , optimal_solution[i][j] + 1 , __a )
print(')' , end=' ' )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
_a : Any = len(__a )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_a, _a : Union[str, Any] = matrix_chain_order(__a )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__a , 1 , n - 1 )
if __name__ == "__main__":
main()
| 5 | 1 |
'''simple docstring'''
def UpperCAmelCase_ (__a : list[int] , __a : list[int] ):
"""simple docstring"""
if not len(__a ) == len(__a ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_a, _a, _a : Tuple = equationa
_a, _a, _a : str = equationa
# Calculate the determinants of the matrices
_a : Union[str, Any] = aa * ba - aa * ba
_a : List[Any] = ca * ba - ca * ba
_a : List[Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_a : int = determinant_x / determinant
_a : List[str] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 5 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase_ (__a : Optional[Any] ):
"""simple docstring"""
_a : int = FileLock(str(tmpdir / 'foo.lock' ) )
_a : List[Any] = FileLock(str(tmpdir / 'foo.lock' ) )
_a : Any = 0.01
with locka.acquire():
with pytest.raises(__a ):
_a : int = time.time()
locka.acquire(__a )
assert time.time() - _start > timeout
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Dict = 'a' * 1_0_0_0 + '.lock'
_a : int = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(__a )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
_a : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__a ):
locka.acquire(0 )
| 5 | 1 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 5 |
'''simple docstring'''
def UpperCAmelCase_ (__a : int = 1_0**1_2 ):
"""simple docstring"""
_a : List[str] = 1
_a : Optional[int] = 0
_a : Any = 1
_a : List[str] = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCAmelCase = 1_6
__lowerCAmelCase = 3_2
def UpperCAmelCase_ (__a : Accelerator , __a : int = 1_6 ):
"""simple docstring"""
_a : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
_a : Union[str, Any] = load_dataset('glue' , 'mrpc' )
def tokenize_function(__a : Tuple ):
# max_length=None => use the model max length (it's actually the default)
_a : Optional[int] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a : Any = datasets.map(
__a , batched=__a , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a : Dict = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__a : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a : Optional[Any] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a : Optional[Any] = 1_6
elif accelerator.mixed_precision != "no":
_a : Optional[Any] = 8
else:
_a : List[Any] = None
return tokenizer.pad(
__a , padding='longest' , max_length=__a , pad_to_multiple_of=__a , return_tensors='pt' , )
# Instantiate dataloaders.
_a : int = DataLoader(
tokenized_datasets['train'] , shuffle=__a , collate_fn=__a , batch_size=__a )
_a : Dict = DataLoader(
tokenized_datasets['validation'] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowerCAmelCase = mocked_dataloaders # noqa: F811
def UpperCAmelCase_ (__a : Dict , __a : str ):
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS' , __a ) == "1":
_a : Optional[Any] = 2
# New Code #
_a : int = int(args.gradient_accumulation_steps )
_a : List[str] = int(args.local_sgd_steps )
# Initialize accelerator
_a : str = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__a )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a : Optional[int] = config['lr']
_a : int = int(config['num_epochs'] )
_a : Optional[Any] = int(config['seed'] )
_a : Tuple = int(config['batch_size'] )
_a : Optional[int] = evaluate.load('glue' , 'mrpc' )
set_seed(__a )
_a, _a : List[Any] = get_dataloaders(__a , __a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a : List[Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
_a : Optional[int] = AdamW(params=model.parameters() , lr=__a )
# Instantiate scheduler
_a : Dict = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=1_0_0 , num_training_steps=(len(__a ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a, _a, _a, _a, _a : Optional[int] = accelerator.prepare(
__a , __a , __a , __a , __a )
# Now we train the model
for epoch in range(__a ):
model.train()
with LocalSGD(
accelerator=__a , model=__a , local_sgd_steps=__a , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__a ):
_a : Tuple = model(**__a )
_a : List[str] = output.loss
accelerator.backward(__a )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a : List[Any] = model(**__a )
_a : str = outputs.logits.argmax(dim=-1 )
_a, _a : List[str] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=__a , references=__a , )
_a : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Dict = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=__a , default=__a , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=__a , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=__a , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
_a : Optional[Any] = parser.parse_args()
_a : Union[str, Any] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(__a , __a )
if __name__ == "__main__":
main()
| 5 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCAmelCase = {
"""vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""},
"""tokenizer_file""": {
"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"""
},
}
__lowerCAmelCase = {"""mobilebert-uncased""": 5_1_2}
__lowerCAmelCase = {}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = VOCAB_FILES_NAMES
__UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[Any] = MobileBertTokenizer
def __init__( self : Dict ,_a : List[Any]=None ,_a : Optional[Any]=None ,_a : Union[str, Any]=True ,_a : Dict="[UNK]" ,_a : Union[str, Any]="[SEP]" ,_a : Any="[PAD]" ,_a : Optional[int]="[CLS]" ,_a : Optional[Any]="[MASK]" ,_a : Dict=True ,_a : Any=None ,**_a : Optional[Any] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
_a : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_a ) != do_lower_case
or normalizer_state.get('strip_accents' ,_a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_a ) != tokenize_chinese_chars
):
_a : Optional[Any] = getattr(_a ,normalizer_state.pop('type' ) )
_a : Dict = do_lower_case
_a : str = strip_accents
_a : Tuple = tokenize_chinese_chars
_a : Optional[Any] = normalizer_class(**_a )
_a : str = do_lower_case
def __lowercase ( self : Tuple ,_a : Union[str, Any] ,_a : List[str]=None ):
'''simple docstring'''
_a : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
_a : List[str] = [self.sep_token_id]
_a : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : Union[str, Any] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
_a : int = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 5 | 1 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__lowerCAmelCase = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def UpperCAmelCase_ (__a : Union[str, Any] , __a : List[str]=None ):
"""simple docstring"""
require_version(deps[pkg] , __a )
| 5 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : List[Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_a : Optional[int] = ''
_a : List[str] = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_a, _a : Optional[int] = 0, 0
# length[i] shows the length of palindromic substring with center i
_a : Optional[Any] = [1 for i in range(len(__a ) )]
# for each character in new_string find corresponding palindromic string
_a : Dict = 0
for j in range(len(__a ) ):
_a : Dict = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_a : Optional[int] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_a : str = j - k + 1 # noqa: E741
_a : Any = j + k - 1
# update max_length and start position
if max_length < length[j]:
_a : Union[str, Any] = length[j]
_a : List[str] = j
# create that string
_a : Tuple = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 1 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"""tokenizer_file""": """tokenizer.json"""}
__lowerCAmelCase = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__UpperCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Dict = ['''input_ids''', '''attention_mask''']
__UpperCAmelCase : Tuple = None
def __init__( self : Optional[Any] ,_a : List[Any]=None ,_a : Union[str, Any]=None ,_a : str=None ,_a : List[str]="<unk>" ,_a : List[Any]="<s>" ,_a : Optional[Any]="</s>" ,_a : Optional[int]="<pad>" ,_a : Optional[int]=False ,_a : Any=False ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(
_a ,_a ,tokenizer_file=_a ,unk_token=_a ,bos_token=_a ,eos_token=_a ,pad_token=_a ,add_prefix_space=_a ,clean_up_tokenization_spaces=_a ,**_a ,)
_a : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' ,_a ) != add_prefix_space:
_a : List[Any] = getattr(_a ,pre_tok_state.pop('type' ) )
_a : Optional[int] = add_prefix_space
_a : Tuple = pre_tok_class(**_a )
_a : Optional[int] = add_prefix_space
def __lowercase ( self : Tuple ,*_a : List[str] ,**_a : List[Any] ):
'''simple docstring'''
_a : Optional[int] = kwargs.get('is_split_into_words' ,_a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
' pretokenized inputs.' )
return super()._batch_encode_plus(*_a ,**_a )
def __lowercase ( self : List[str] ,*_a : str ,**_a : Tuple ):
'''simple docstring'''
_a : List[Any] = kwargs.get('is_split_into_words' ,_a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
' pretokenized inputs.' )
return super()._encode_plus(*_a ,**_a )
def __lowercase ( self : Dict ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
_a : Dict = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
def __lowercase ( self : List[Any] ,_a : "Conversation" ):
'''simple docstring'''
_a : Any = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_a ,add_special_tokens=_a ) + [self.eos_token_id] )
if len(_a ) > self.model_max_length:
_a : Any = input_ids[-self.model_max_length :]
return input_ids
| 5 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 1 |
'''simple docstring'''
from math import factorial
__lowerCAmelCase = {str(digit): factorial(digit) for digit in range(1_0)}
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if not isinstance(__a , __a ):
raise TypeError('Parameter number must be int' )
if number < 0:
raise ValueError('Parameter number must be greater than or equal to 0' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__a ) )
def UpperCAmelCase_ (__a : int = 6_0 , __a : int = 1_0_0_0_0_0_0 ):
"""simple docstring"""
if not isinstance(__a , __a ) or not isinstance(__a , __a ):
raise TypeError('Parameters chain_length and number_limit must be int' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'Parameters chain_length and number_limit must be greater than 0' )
# the counter for the chains with the exact desired length
_a : int = 0
# the cached sizes of the previous chains
_a : dict[int, int] = {}
for start_chain_element in range(1 , __a ):
# The temporary set will contain the elements of the chain
_a : str = set()
_a : Union[str, Any] = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
_a : Dict = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__a )
chain_set_length += 1
_a : List[Any] = digit_factorial_sum(__a )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
_a : Tuple = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution()}''')
| 5 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__lowerCAmelCase = threading.Lock()
__lowerCAmelCase = None
__lowerCAmelCase = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
__lowerCAmelCase = logging.WARNING
__lowerCAmelCase = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Dict = os.getenv('TRANSFORMERS_VERBOSITY' , __a )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def UpperCAmelCase_ ():
"""simple docstring"""
return __name__.split('.' )[0]
def UpperCAmelCase_ ():
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_a : str = logging.StreamHandler() # Set sys.stderr as stream.
_a : Optional[Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
_a : List[Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_a : List[str] = False
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
_a : int = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_a : str = None
def UpperCAmelCase_ ():
"""simple docstring"""
return log_levels
def UpperCAmelCase_ (__a : Optional[str] = None ):
"""simple docstring"""
if name is None:
_a : List[Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__a )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : Union[str, Any] = False
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : Dict = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = _get_library_root_logger().handlers
for handler in handlers:
_a : Union[str, Any] = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Union[str, Any] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__a )
def UpperCAmelCase_ (self : Union[str, Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ):
"""simple docstring"""
_a : Union[str, Any] = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , __a )
if no_advisory_warnings:
return
self.warning(*__a , **__a )
__lowerCAmelCase = warning_advice
@functools.lru_cache(__a )
def UpperCAmelCase_ (self : int , *__a : Optional[Any] , **__a : Any ):
"""simple docstring"""
self.warning(*__a , **__a )
__lowerCAmelCase = warning_once
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Any ,*_a : Tuple ,**_a : int ): # pylint: disable=unused-argument
'''simple docstring'''
_a : int = args[0] if args else None
def __iter__( self : str ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : List[Any] ,_a : int ):
'''simple docstring'''
def empty_fn(*_a : Optional[Any] ,**_a : Any ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : List[str] ):
'''simple docstring'''
return self
def __exit__( self : List[str] ,_a : str ,_a : List[Any] ,_a : str ):
'''simple docstring'''
return
class UpperCAmelCase__ :
"""simple docstring"""
def __call__( self : Union[str, Any] ,*_a : Tuple ,**_a : Tuple ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*_a ,**_a )
else:
return EmptyTqdm(*_a ,**_a )
def __lowercase ( self : str ,*_a : List[Any] ,**_a : Any ):
'''simple docstring'''
_a : Any = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_a ,**_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__lowerCAmelCase = _tqdm_cls()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : str = True
hf_hub_utils.enable_progress_bars()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : Dict = False
hf_hub_utils.disable_progress_bars()
| 5 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''vivit'''
def __init__( self : Tuple ,_a : int=224 ,_a : Optional[Any]=32 ,_a : Optional[Any]=[2, 16, 16] ,_a : Any=3 ,_a : Union[str, Any]=768 ,_a : Optional[Any]=12 ,_a : Optional[int]=12 ,_a : Dict=3072 ,_a : List[Any]="gelu_fast" ,_a : List[Any]=0.0 ,_a : Optional[int]=0.0 ,_a : List[str]=0.02 ,_a : List[Any]=1E-06 ,_a : List[str]=True ,**_a : Tuple ,):
'''simple docstring'''
_a : Any = hidden_size
_a : Tuple = num_hidden_layers
_a : Tuple = num_attention_heads
_a : Tuple = intermediate_size
_a : List[str] = hidden_act
_a : int = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : Union[str, Any] = initializer_range
_a : Any = layer_norm_eps
_a : Dict = image_size
_a : Optional[int] = num_frames
_a : str = tubelet_size
_a : Dict = num_channels
_a : Optional[Any] = qkv_bias
super().__init__(**_a )
| 5 |
'''simple docstring'''
def UpperCAmelCase_ (__a : list[int] , __a : list[int] ):
"""simple docstring"""
if not len(__a ) == len(__a ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_a, _a, _a : Tuple = equationa
_a, _a, _a : str = equationa
# Calculate the determinants of the matrices
_a : Union[str, Any] = aa * ba - aa * ba
_a : List[Any] = ca * ba - ca * ba
_a : List[Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_a : int = determinant_x / determinant
_a : List[str] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 5 | 1 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = XLMRobertaTokenizer
__UpperCAmelCase : Union[str, Any] = XLMRobertaTokenizerFast
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : Optional[int] = True
def __lowercase ( self : Tuple ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_a : Dict = XLMRobertaTokenizer(_a ,keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Union[str, Any] = '<pad>'
_a : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) ,_a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) ,_a )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<s>' )
self.assertEqual(vocab_keys[1] ,'<pad>' )
self.assertEqual(vocab_keys[-1] ,'<mask>' )
self.assertEqual(len(_a ) ,1002 )
def __lowercase ( self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,1002 )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Union[str, Any] = XLMRobertaTokenizer(_a ,keep_accents=_a )
_a : Optional[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(_a ,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
_a : str = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_a ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] ,)
_a : Union[str, Any] = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] ,)
_a : Union[str, Any] = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] ,)
def __lowercase ( self : List[str] ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_a : Union[str, Any] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_a : Tuple = self.rust_tokenizer_class.from_pretrained(_a ,**_a )
_a : List[str] = self.tokenizer_class.from_pretrained(_a ,**_a )
_a : Any = tempfile.mkdtemp()
_a : int = tokenizer_r.save_pretrained(_a )
_a : Any = tokenizer_p.save_pretrained(_a )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
_a : Optional[Any] = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(_a ,_a )
# Checks everything loads correctly in the same way
_a : Optional[int] = tokenizer_r.from_pretrained(_a )
_a : Tuple = tokenizer_p.from_pretrained(_a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_a ,_a ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_a )
# Save tokenizer rust, legacy_format=True
_a : Union[str, Any] = tempfile.mkdtemp()
_a : Optional[int] = tokenizer_r.save_pretrained(_a ,legacy_format=_a )
_a : str = tokenizer_p.save_pretrained(_a )
# Checks it save with the same files
self.assertSequenceEqual(_a ,_a )
# Checks everything loads correctly in the same way
_a : Tuple = tokenizer_r.from_pretrained(_a )
_a : Tuple = tokenizer_p.from_pretrained(_a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_a ,_a ) )
shutil.rmtree(_a )
# Save tokenizer rust, legacy_format=False
_a : Any = tempfile.mkdtemp()
_a : Any = tokenizer_r.save_pretrained(_a ,legacy_format=_a )
_a : Union[str, Any] = tokenizer_p.save_pretrained(_a )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_a : Any = tokenizer_r.from_pretrained(_a )
_a : int = tokenizer_p.from_pretrained(_a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_a ,_a ) )
shutil.rmtree(_a )
@cached_property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_a ,f.name )
_a : Dict = XLMRobertaTokenizer(f.name ,keep_accents=_a )
_a : Optional[Any] = pickle.dumps(_a )
pickle.loads(_a )
def __lowercase ( self : int ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_a : List[str] = self.get_tokenizer()
_a : Optional[int] = self.get_rust_tokenizer()
_a : str = 'I was born in 92000, and this is falsé.'
_a : List[str] = tokenizer.tokenize(_a )
_a : str = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a ,_a )
_a : Optional[Any] = tokenizer.encode(_a ,add_special_tokens=_a )
_a : Optional[Any] = rust_tokenizer.encode(_a ,add_special_tokens=_a )
self.assertListEqual(_a ,_a )
_a : Optional[Any] = self.get_rust_tokenizer()
_a : Any = tokenizer.encode(_a )
_a : Any = rust_tokenizer.encode(_a )
self.assertListEqual(_a ,_a )
@slow
def __lowercase ( self : str ):
'''simple docstring'''
_a : Optional[int] = 'Hello World!'
_a : Optional[int] = [0, 3_5378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_a ,self.big_tokenizer.encode(_a ) )
@slow
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : List[str] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
_a : List[str] = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
17_9459,
12_4850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
1_0114,
711,
152,
20,
6,
5,
2_2376,
642,
1221,
1_5190,
3_4153,
450,
5608,
959,
1119,
5_7702,
136,
186,
47,
1098,
2_9367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
5_0901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_a ,self.big_tokenizer.encode(_a ) )
@slow
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Any = {'input_ids': [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a ,model_name='xlm-roberta-base' ,revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' ,)
| 5 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ,_a : List[str] ,_a : Optional[Any]=13 ,_a : str=30 ,_a : str=2 ,_a : Union[str, Any]=3 ,_a : Optional[Any]=True ,_a : int=True ,_a : Union[str, Any]=32 ,_a : List[Any]=5 ,_a : Union[str, Any]=4 ,_a : int=37 ,_a : Any="gelu" ,_a : Union[str, Any]=0.1 ,_a : str=0.1 ,_a : List[str]=10 ,_a : Dict=0.02 ,_a : Tuple=None ,):
'''simple docstring'''
_a : Any = parent
_a : int = batch_size
_a : List[Any] = image_size
_a : Optional[int] = patch_size
_a : List[str] = num_channels
_a : Dict = is_training
_a : Dict = use_labels
_a : Optional[Any] = hidden_size
_a : str = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : Dict = intermediate_size
_a : Union[str, Any] = hidden_act
_a : List[str] = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : List[str] = type_sequence_label_size
_a : int = initializer_range
_a : List[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_a : Union[str, Any] = (image_size // patch_size) ** 2
_a : Tuple = num_patches + 1
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : str = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : List[str] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def __lowercase ( self : Tuple ,_a : Any ,_a : List[Any] ,_a : int ):
'''simple docstring'''
_a : str = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
_a : int = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : List[Any] ,_a : str ,_a : Tuple ,_a : Dict ):
'''simple docstring'''
_a : Tuple = self.type_sequence_label_size
_a : int = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Dict = model(_a ,labels=_a )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a : int = 1
_a : Optional[Any] = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = self.prepare_config_and_inputs()
_a, _a, _a : int = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCAmelCase : List[Any] = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : List[str] = ViTMSNModelTester(self )
_a : Optional[int] = ConfigTester(self ,config_class=_a ,has_text_modality=_a ,hidden_size=37 )
def __lowercase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a, _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a ,nn.Linear ) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a, _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[str] = model_class(_a )
_a : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : List[Any] = [*signature.parameters.keys()]
_a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self : int ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Dict = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(2 )
_a : List[str] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(_a )
_a : List[str] = self.default_image_processor
_a : int = prepare_img()
_a : Tuple = image_processor(images=_a ,return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
_a : Optional[int] = model(**_a )
# verify the logits
_a : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_a )
_a : List[Any] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_a ,atol=1E-4 ) )
| 5 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : torch.FloatTensor
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple ,_a : List[str]=3 ,_a : Optional[int]=3 ,_a : List[Any]=("DownEncoderBlock2D",) ,_a : Tuple=(64,) ,_a : Dict=2 ,_a : Optional[int]=32 ,_a : Tuple="silu" ,_a : Tuple=True ,):
'''simple docstring'''
super().__init__()
_a : int = layers_per_block
_a : List[str] = torch.nn.Convad(
_a ,block_out_channels[0] ,kernel_size=3 ,stride=1 ,padding=1 ,)
_a : Union[str, Any] = None
_a : List[Any] = nn.ModuleList([] )
# down
_a : List[str] = block_out_channels[0]
for i, down_block_type in enumerate(_a ):
_a : Union[str, Any] = output_channel
_a : int = block_out_channels[i]
_a : int = i == len(_a ) - 1
_a : List[str] = get_down_block(
_a ,num_layers=self.layers_per_block ,in_channels=_a ,out_channels=_a ,add_downsample=not is_final_block ,resnet_eps=1E-6 ,downsample_padding=0 ,resnet_act_fn=_a ,resnet_groups=_a ,attention_head_dim=_a ,temb_channels=_a ,)
self.down_blocks.append(_a )
# mid
_a : Optional[int] = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1E-6 ,resnet_act_fn=_a ,output_scale_factor=1 ,resnet_time_scale_shift='default' ,attention_head_dim=block_out_channels[-1] ,resnet_groups=_a ,temb_channels=_a ,)
# out
_a : Optional[Any] = nn.GroupNorm(num_channels=block_out_channels[-1] ,num_groups=_a ,eps=1E-6 )
_a : Dict = nn.SiLU()
_a : List[Any] = 2 * out_channels if double_z else out_channels
_a : Optional[Any] = nn.Convad(block_out_channels[-1] ,_a ,3 ,padding=1 )
_a : List[Any] = False
def __lowercase ( self : Tuple ,_a : Optional[Any] ):
'''simple docstring'''
_a : str = x
_a : str = self.conv_in(_a )
if self.training and self.gradient_checkpointing:
def create_custom_forward(_a : str ):
def custom_forward(*_a : str ):
return module(*_a )
return custom_forward
# down
if is_torch_version('>=' ,'1.11.0' ):
for down_block in self.down_blocks:
_a : Any = torch.utils.checkpoint.checkpoint(
create_custom_forward(_a ) ,_a ,use_reentrant=_a )
# middle
_a : Any = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,_a ,use_reentrant=_a )
else:
for down_block in self.down_blocks:
_a : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(_a ) ,_a )
# middle
_a : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) ,_a )
else:
# down
for down_block in self.down_blocks:
_a : int = down_block(_a )
# middle
_a : List[str] = self.mid_block(_a )
# post-process
_a : List[Any] = self.conv_norm_out(_a )
_a : str = self.conv_act(_a )
_a : Any = self.conv_out(_a )
return sample
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] ,_a : Dict=3 ,_a : Optional[int]=3 ,_a : List[Any]=("UpDecoderBlock2D",) ,_a : Union[str, Any]=(64,) ,_a : Union[str, Any]=2 ,_a : List[str]=32 ,_a : Optional[int]="silu" ,_a : Optional[Any]="group" ,):
'''simple docstring'''
super().__init__()
_a : Optional[Any] = layers_per_block
_a : Union[str, Any] = nn.Convad(
_a ,block_out_channels[-1] ,kernel_size=3 ,stride=1 ,padding=1 ,)
_a : List[str] = None
_a : Union[str, Any] = nn.ModuleList([] )
_a : str = in_channels if norm_type == 'spatial' else None
# mid
_a : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1E-6 ,resnet_act_fn=_a ,output_scale_factor=1 ,resnet_time_scale_shift='default' if norm_type == 'group' else norm_type ,attention_head_dim=block_out_channels[-1] ,resnet_groups=_a ,temb_channels=_a ,)
# up
_a : str = list(reversed(_a ) )
_a : Tuple = reversed_block_out_channels[0]
for i, up_block_type in enumerate(_a ):
_a : Optional[Any] = output_channel
_a : Any = reversed_block_out_channels[i]
_a : str = i == len(_a ) - 1
_a : Optional[int] = get_up_block(
_a ,num_layers=self.layers_per_block + 1 ,in_channels=_a ,out_channels=_a ,prev_output_channel=_a ,add_upsample=not is_final_block ,resnet_eps=1E-6 ,resnet_act_fn=_a ,resnet_groups=_a ,attention_head_dim=_a ,temb_channels=_a ,resnet_time_scale_shift=_a ,)
self.up_blocks.append(_a )
_a : Dict = output_channel
# out
if norm_type == "spatial":
_a : str = SpatialNorm(block_out_channels[0] ,_a )
else:
_a : List[str] = nn.GroupNorm(num_channels=block_out_channels[0] ,num_groups=_a ,eps=1E-6 )
_a : Optional[Any] = nn.SiLU()
_a : List[Any] = nn.Convad(block_out_channels[0] ,_a ,3 ,padding=1 )
_a : List[Any] = False
def __lowercase ( self : Tuple ,_a : Union[str, Any] ,_a : List[str]=None ):
'''simple docstring'''
_a : List[Any] = z
_a : List[str] = self.conv_in(_a )
_a : List[str] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(_a : Optional[Any] ):
def custom_forward(*_a : int ):
return module(*_a )
return custom_forward
if is_torch_version('>=' ,'1.11.0' ):
# middle
_a : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,_a ,_a ,use_reentrant=_a )
_a : str = sample.to(_a )
# up
for up_block in self.up_blocks:
_a : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(_a ) ,_a ,_a ,use_reentrant=_a )
else:
# middle
_a : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,_a ,_a )
_a : Optional[int] = sample.to(_a )
# up
for up_block in self.up_blocks:
_a : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(_a ) ,_a ,_a )
else:
# middle
_a : Tuple = self.mid_block(_a ,_a )
_a : Optional[Any] = sample.to(_a )
# up
for up_block in self.up_blocks:
_a : Any = up_block(_a ,_a )
# post-process
if latent_embeds is None:
_a : List[Any] = self.conv_norm_out(_a )
else:
_a : Any = self.conv_norm_out(_a ,_a )
_a : Any = self.conv_act(_a )
_a : Tuple = self.conv_out(_a )
return sample
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict ,_a : Optional[Any] ,_a : Tuple ,_a : List[str] ,_a : int=None ,_a : List[str]="random" ,_a : List[Any]=False ,_a : str=True ):
'''simple docstring'''
super().__init__()
_a : str = n_e
_a : int = vq_embed_dim
_a : int = beta
_a : Any = legacy
_a : str = nn.Embedding(self.n_e ,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e ,1.0 / self.n_e )
_a : int = remap
if self.remap is not None:
self.register_buffer('used' ,torch.tensor(np.load(self.remap ) ) )
_a : List[Any] = self.used.shape[0]
_a : int = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_a : Dict = self.re_embed
_a : int = self.re_embed + 1
print(
F"""Remapping {self.n_e} indices to {self.re_embed} indices. """
F"""Using {self.unknown_index} for unknown indices.""" )
else:
_a : Dict = n_e
_a : int = sane_index_shape
def __lowercase ( self : Optional[Any] ,_a : Dict ):
'''simple docstring'''
_a : List[Any] = inds.shape
assert len(_a ) > 1
_a : int = inds.reshape(ishape[0] ,-1 )
_a : str = self.used.to(_a )
_a : Any = (inds[:, :, None] == used[None, None, ...]).long()
_a : Tuple = match.argmax(-1 )
_a : List[Any] = match.sum(2 ) < 1
if self.unknown_index == "random":
_a : Optional[Any] = torch.randint(0 ,self.re_embed ,size=new[unknown].shape ).to(device=new.device )
else:
_a : Union[str, Any] = self.unknown_index
return new.reshape(_a )
def __lowercase ( self : List[Any] ,_a : Dict ):
'''simple docstring'''
_a : Union[str, Any] = inds.shape
assert len(_a ) > 1
_a : str = inds.reshape(ishape[0] ,-1 )
_a : Optional[int] = self.used.to(_a )
if self.re_embed > self.used.shape[0]: # extra token
_a : Tuple = 0 # simply set to zero
_a : Tuple = torch.gather(used[None, :][inds.shape[0] * [0], :] ,1 ,_a )
return back.reshape(_a )
def __lowercase ( self : Optional[int] ,_a : str ):
'''simple docstring'''
_a : Any = z.permute(0 ,2 ,3 ,1 ).contiguous()
_a : Dict = z.view(-1 ,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_a : str = torch.argmin(torch.cdist(_a ,self.embedding.weight ) ,dim=1 )
_a : List[str] = self.embedding(_a ).view(z.shape )
_a : Dict = None
_a : List[Any] = None
# compute loss for embedding
if not self.legacy:
_a : Optional[int] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_a : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_a : Union[str, Any] = z + (z_q - z).detach()
# reshape back to match original input shape
_a : str = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
if self.remap is not None:
_a : int = min_encoding_indices.reshape(z.shape[0] ,-1 ) # add batch axis
_a : Any = self.remap_to_used(_a )
_a : List[str] = min_encoding_indices.reshape(-1 ,1 ) # flatten
if self.sane_index_shape:
_a : Tuple = min_encoding_indices.reshape(z_q.shape[0] ,z_q.shape[2] ,z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __lowercase ( self : str ,_a : Optional[int] ,_a : Tuple ):
'''simple docstring'''
if self.remap is not None:
_a : Dict = indices.reshape(shape[0] ,-1 ) # add batch axis
_a : int = self.unmap_to_all(_a )
_a : str = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_a : Dict = self.embedding(_a )
if shape is not None:
_a : List[Any] = z_q.view(_a )
# reshape back to match original input shape
_a : int = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
return z_q
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Optional[int] ,_a : Optional[Any] ,_a : List[str]=False ):
'''simple docstring'''
_a : Dict = parameters
_a, _a : Union[str, Any] = torch.chunk(_a ,2 ,dim=1 )
_a : List[str] = torch.clamp(self.logvar ,-30.0 ,20.0 )
_a : Any = deterministic
_a : Any = torch.exp(0.5 * self.logvar )
_a : Union[str, Any] = torch.exp(self.logvar )
if self.deterministic:
_a : List[str] = torch.zeros_like(
self.mean ,device=self.parameters.device ,dtype=self.parameters.dtype )
def __lowercase ( self : Optional[int] ,_a : Optional[torch.Generator] = None ):
'''simple docstring'''
_a : List[str] = randn_tensor(
self.mean.shape ,generator=_a ,device=self.parameters.device ,dtype=self.parameters.dtype )
_a : Tuple = self.mean + self.std * sample
return x
def __lowercase ( self : Optional[int] ,_a : List[str]=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean ,2 ) + self.var - 1.0 - self.logvar ,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean ,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar ,dim=[1, 2, 3] ,)
def __lowercase ( self : Any ,_a : Tuple ,_a : Dict=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
_a : List[str] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean ,2 ) / self.var ,dim=_a )
def __lowercase ( self : int ):
'''simple docstring'''
return self.mean
| 5 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ (__a : str = "https://www.worldometers.info/coronavirus" ):
"""simple docstring"""
_a : List[str] = BeautifulSoup(requests.get(__a ).text , 'html.parser' )
_a : Dict = soup.findAll('h1' )
_a : Union[str, Any] = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__a , __a )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5 | 1 |
'''simple docstring'''
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__lowerCAmelCase = """."""
if __name__ == "__main__":
__lowerCAmelCase = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
__lowerCAmelCase = []
__lowerCAmelCase = []
with open(doctest_file_path) as fp:
for line in fp:
__lowerCAmelCase = line.strip()
__lowerCAmelCase = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__lowerCAmelCase = """\n""".join(non_existent_paths)
raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
| 5 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
__lowerCAmelCase = """docs/source/en/_toctree.yml"""
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Any = defaultdict(__a )
for doc in model_doc:
counts[doc["local"]] += 1
_a : List[str] = [key for key, value in counts.items() if value > 1]
_a : str = []
for duplicate_key in duplicates:
_a : Union[str, Any] = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__a , key=lambda __a : s["title"].lower() )
def UpperCAmelCase_ (__a : Optional[int]=False ):
"""simple docstring"""
with open(__a , encoding='utf-8' ) as f:
_a : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
_a : Union[str, Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_a : Union[str, Any] = content[api_idx]['sections']
# Then to the model doc
_a : List[str] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_a : List[str] = api_doc[model_idx]['sections']
_a : List[Any] = [(idx, section) for idx, section in enumerate(__a ) if 'sections' in section]
_a : Tuple = False
for idx, modality_doc in modalities_docs:
_a : List[Any] = modality_doc['sections']
_a : Any = clean_model_doc_toc(__a )
if old_modality_doc != new_modality_doc:
_a : Union[str, Any] = True
if overwrite:
_a : str = new_modality_doc
if diff:
if overwrite:
_a : Dict = model_doc
_a : Dict = api_doc
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__lowerCAmelCase = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 5 | 1 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__lowerCAmelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__lowerCAmelCase = 1_2_8_0_2_2
__lowerCAmelCase = 1_2_8_0_2_8
@require_sentencepiece
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = MaMaaaTokenizer
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : List[Any] = True
def __lowercase ( self : str ):
'''simple docstring'''
super().setUp()
_a : List[str] = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
_a : List[str] = dict(zip(_a ,range(len(_a ) ) ) )
_a : Union[str, Any] = Path(self.tmpdirname )
save_json(_a ,save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_a ,save_dir / VOCAB_FILES_NAMES['spm_file'] )
_a : List[Any] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self : Any ,**_a : Optional[int] ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : Tuple ,_a : int ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Optional[int] = '</s>'
_a : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) ,_a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) ,_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : str = self.get_tokenizer()
_a : Optional[Any] = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'</s>' )
self.assertEqual(vocab_keys[1] ,'<unk>' )
self.assertEqual(vocab_keys[-1] ,'<s>' )
self.assertEqual(len(_a ) ,tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('Skip this test while all models are still to be uploaded.' )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
pass
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Tuple = self.get_tokenizer()
_a : Tuple = tokenizer.tokenize('This is a test' )
self.assertListEqual(_a ,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) ,[2, 3, 4, 5, 6] ,)
_a : Union[str, Any] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(_a ,['▁This', '▁is', '▁a', '▁t', 'est'] )
_a : Tuple = tokenizer.convert_tokens_to_string(_a )
self.assertEqual(_a ,'This is a test' )
@slow
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Optional[Any] = {'input_ids': [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a ,model_name='facebook/m2m100_418M' ,revision='c168bae485c864188cf9aa0e4108b0b6934dc91e' ,)
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = '''facebook/m2m100_418M'''
__UpperCAmelCase : List[str] = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
__UpperCAmelCase : List[Any] = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
__UpperCAmelCase : Dict = [EN_CODE, 593, 1949, 11_5781, 4, 7_1586, 4234, 6_0633, 12_6233, 432, 12_3808, 1_5592, 1197, 11_7132, 12_0618, 5, 2]
@classmethod
def __lowercase ( cls : Dict ):
'''simple docstring'''
_a : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang='en' ,tgt_lang='fr' )
_a : Union[str, Any] = 1
return cls
def __lowercase ( self : int ):
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id('ar' ) ,12_8006 )
self.assertEqual(self.tokenizer.get_lang_id('en' ) ,12_8022 )
self.assertEqual(self.tokenizer.get_lang_id('ro' ) ,12_8076 )
self.assertEqual(self.tokenizer.get_lang_id('mr' ) ,12_8063 )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Dict = self.tokenizer.get_vocab()
self.assertEqual(len(_a ) ,self.tokenizer.vocab_size )
self.assertEqual(vocab['<unk>'] ,3 )
self.assertIn(self.tokenizer.get_lang_token('en' ) ,_a )
def __lowercase ( self : str ):
'''simple docstring'''
_a : List[Any] = 'en'
_a : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,_a )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
self.assertIn(_a ,self.tokenizer.all_special_ids )
# fmt: off
_a : List[Any] = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
_a : Dict = self.tokenizer.decode(_a ,skip_special_tokens=_a )
_a : Dict = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=_a )
self.assertEqual(_a ,_a )
self.assertNotIn(self.tokenizer.eos_token ,_a )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[Any] = tempfile.mkdtemp()
_a : Any = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_a )
_a : Optional[Any] = MaMaaaTokenizer.from_pretrained(_a )
self.assertDictEqual(new_tok.lang_token_to_id ,_a )
@require_torch
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Dict = 'en'
_a : int = 'fr'
_a : Any = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=_a ,return_tensors='pt' )
_a : Optional[int] = shift_tokens_right(
batch['labels'] ,self.tokenizer.pad_token_id ,self.tokenizer.eos_token_id )
for k in batch:
_a : Any = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Tuple = 'mr'
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
_a : Union[str, Any] = 'zh'
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
@require_torch
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : List[str] = 'mr'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
_a : Optional[Any] = 'zh'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Union[str, Any] = self.tokenizer._build_translation_inputs('A test' ,return_tensors='pt' ,src_lang='en' ,tgt_lang='ar' )
self.assertEqual(
nested_simplify(_a ) ,{
# en_XX, A, test, EOS
'input_ids': [[12_8022, 58, 4183, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 12_8006,
} ,)
| 5 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) != 3_2:
raise ValueError('Input must be of length 32' )
_a : Any = b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '08x' )[-8:]
_a : str = b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : List[Any] = b''
for char in message:
bit_string += format(__a , '08b' ).encode('utf-8' )
_a : int = format(len(__a ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__a ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(__a ) , 5_1_2 ):
_a : List[Any] = bit_string[pos : pos + 5_1_2]
_a : str = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '032b' )
_a : int = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__a , 2 )
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
return (a + b) % 2**3_2
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : str = preprocess(__a )
_a : Optional[int] = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
_a : int = 0x67_45_23_01
_a : Union[str, Any] = 0xEF_CD_AB_89
_a : str = 0x98_BA_DC_FE
_a : List[Any] = 0x10_32_54_76
_a : Optional[int] = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__a ):
_a : Union[str, Any] = aa
_a : List[Any] = ba
_a : List[Any] = ca
_a : Dict = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a : Optional[int] = d ^ (b & (c ^ d))
_a : Optional[Any] = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a : Optional[Any] = c ^ (d & (b ^ c))
_a : Dict = (5 * i + 1) % 1_6
elif i <= 4_7:
_a : Optional[Any] = b ^ c ^ d
_a : Dict = (3 * i + 5) % 1_6
else:
_a : int = c ^ (b | not_aa(__a ))
_a : List[str] = (7 * i) % 1_6
_a : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**3_2
_a : Union[str, Any] = d
_a : Tuple = c
_a : Optional[int] = b
_a : Union[str, Any] = sum_aa(__a , left_rotate_aa(__a , shift_amounts[i] ) )
# Add hashed chunk to running total
_a : Any = sum_aa(__a , __a )
_a : Dict = sum_aa(__a , __a )
_a : Union[str, Any] = sum_aa(__a , __a )
_a : str = sum_aa(__a , __a )
_a : Optional[Any] = reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 1 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase_ (__a : str , __a : Dict=0.999 , __a : List[str]="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__a : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__a : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_a : Tuple = []
for i in range(__a ):
_a : Union[str, Any] = i / num_diffusion_timesteps
_a : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__a ) / alpha_bar_fn(__a ) , __a ) )
return torch.tensor(__a , dtype=torch.floataa )
class UpperCAmelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Dict = 2
@register_to_config
def __init__( self : str ,_a : int = 1000 ,_a : float = 0.0_0085 ,_a : float = 0.012 ,_a : str = "linear" ,_a : Optional[Union[np.ndarray, List[float]]] = None ,_a : str = "epsilon" ,_a : Optional[bool] = False ,_a : Optional[bool] = False ,_a : float = 1.0 ,_a : str = "linspace" ,_a : int = 0 ,):
'''simple docstring'''
if trained_betas is not None:
_a : List[str] = torch.tensor(_a ,dtype=torch.floataa )
elif beta_schedule == "linear":
_a : Tuple = torch.linspace(_a ,_a ,_a ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a : List[str] = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,_a ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a : Dict = betas_for_alpha_bar(_a ,alpha_transform_type='cosine' )
elif beta_schedule == "exp":
_a : Tuple = betas_for_alpha_bar(_a ,alpha_transform_type='exp' )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_a : Optional[Any] = 1.0 - self.betas
_a : Optional[int] = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(_a ,_a ,_a )
_a : Optional[int] = use_karras_sigmas
def __lowercase ( self : Any ,_a : Union[str, Any] ,_a : Optional[Any]=None ):
'''simple docstring'''
if schedule_timesteps is None:
_a : List[Any] = self.timesteps
_a : Dict = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a : int = 1 if len(_a ) > 1 else 0
else:
_a : str = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
_a : str = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Union[float, torch.FloatTensor] ,):
'''simple docstring'''
_a : List[Any] = self.index_for_timestep(_a )
_a : Tuple = self.sigmas[step_index]
_a : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowercase ( self : Any ,_a : int ,_a : Union[str, torch.device] = None ,_a : Optional[int] = None ,):
'''simple docstring'''
_a : Optional[Any] = num_inference_steps
_a : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a : Optional[Any] = np.linspace(0 ,num_train_timesteps - 1 ,_a ,dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a : str = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : int = (np.arange(0 ,_a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a : Any = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : Union[str, Any] = (np.arange(_a ,0 ,-step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_a : Tuple = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a : Union[str, Any] = np.log(_a )
_a : str = np.interp(_a ,np.arange(0 ,len(_a ) ) ,_a )
if self.config.use_karras_sigmas:
_a : List[Any] = self._convert_to_karras(in_sigmas=_a ,num_inference_steps=self.num_inference_steps )
_a : Dict = np.array([self._sigma_to_t(_a ,_a ) for sigma in sigmas] )
_a : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a : Union[str, Any] = torch.from_numpy(_a ).to(device=_a )
_a : Any = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_a : List[Any] = torch.from_numpy(_a )
_a : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_a ).startswith('mps' ):
# mps does not support float64
_a : Tuple = timesteps.to(_a ,dtype=torch.floataa )
else:
_a : Dict = timesteps.to(device=_a )
# empty dt and derivative
_a : Tuple = None
_a : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a : Union[str, Any] = defaultdict(_a )
def __lowercase ( self : str ,_a : Dict ,_a : Dict ):
'''simple docstring'''
_a : Optional[int] = np.log(_a )
# get distribution
_a : Union[str, Any] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_a : List[Any] = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_a : Tuple = low_idx + 1
_a : Union[str, Any] = log_sigmas[low_idx]
_a : Optional[Any] = log_sigmas[high_idx]
# interpolate sigmas
_a : Optional[Any] = (low - log_sigma) / (low - high)
_a : List[str] = np.clip(_a ,0 ,1 )
# transform interpolation to time range
_a : Union[str, Any] = (1 - w) * low_idx + w * high_idx
_a : List[str] = t.reshape(sigma.shape )
return t
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Tuple ):
'''simple docstring'''
_a : float = in_sigmas[-1].item()
_a : float = in_sigmas[0].item()
_a : Tuple = 7.0 # 7.0 is the value used in the paper
_a : str = np.linspace(0 ,1 ,_a )
_a : Optional[Any] = sigma_min ** (1 / rho)
_a : Union[str, Any] = sigma_max ** (1 / rho)
_a : str = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return self.dt is None
def __lowercase ( self : int ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : Union[float, torch.FloatTensor] ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : bool = True ,):
'''simple docstring'''
_a : Union[str, Any] = self.index_for_timestep(_a )
# advance index counter by 1
_a : Any = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a : Tuple = self.sigmas[step_index]
_a : int = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_a : List[str] = self.sigmas[step_index - 1]
_a : List[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a : Optional[int] = 0
_a : Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a : Dict = sigma_hat if self.state_in_first_order else sigma_next
_a : Optional[int] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a : List[Any] = sigma_hat if self.state_in_first_order else sigma_next
_a : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_a : Union[str, Any] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
_a : Optional[int] = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a : Optional[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a : Any = sigma_next - sigma_hat
# store for 2nd order step
_a : int = derivative
_a : List[str] = dt
_a : Union[str, Any] = sample
else:
# 2. 2nd order / Heun's method
_a : Dict = (sample - pred_original_sample) / sigma_next
_a : Tuple = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_a : Optional[Any] = self.dt
_a : Union[str, Any] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_a : List[Any] = None
_a : Union[str, Any] = None
_a : Dict = None
_a : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __lowercase ( self : Optional[int] ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,):
'''simple docstring'''
_a : str = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
_a : Dict = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
_a : Optional[Any] = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
_a : int = self.timesteps.to(original_samples.device )
_a : Optional[Any] = timesteps.to(original_samples.device )
_a : Any = [self.index_for_timestep(_a ,_a ) for t in timesteps]
_a : Optional[int] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a : Optional[Any] = sigma.unsqueeze(-1 )
_a : Any = original_samples + noise * sigma
return noisy_samples
def __len__( self : Optional[int] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 5 | 1 |
'''simple docstring'''
def UpperCAmelCase_ (__a : Dict ):
"""simple docstring"""
_a : Optional[Any] = 1
_a : Optional[Any] = 2
while i * i <= n:
_a : Dict = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = 1
_a : Dict = 1
while True:
i += 1
t_num += i
if count_divisors(__a ) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 5 |
'''simple docstring'''
import qiskit
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
_a : Any = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_a : List[Any] = qiskit.QuantumCircuit(__a , __a )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_a : Tuple = qiskit.execute(__a , __a , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__a )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 5 | 1 |
'''simple docstring'''
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCAmelCase = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
__lowerCAmelCase = subprocess.check_output(f'''git diff --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
__lowerCAmelCase = """|""".join(sys.argv[1:])
__lowerCAmelCase = re.compile(rf'''^({joined_dirs}).*?\.py$''')
__lowerCAmelCase = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 5 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def __lowercase ( self : str ):
'''simple docstring'''
_a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_a : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_a : List[str] = 'xvjiarui/stable-diffusion-2-inpainting'
_a, _a : str = FlaxStableDiffusionInpaintPipeline.from_pretrained(_a ,safety_checker=_a )
_a : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
_a : int = jax.random.PRNGKey(0 )
_a : Tuple = 50
_a : Any = jax.device_count()
_a : Dict = num_samples * [prompt]
_a : Optional[Any] = num_samples * [init_image]
_a : str = num_samples * [mask_image]
_a, _a, _a : Optional[Any] = pipeline.prepare_inputs(_a ,_a ,_a )
# shard inputs and rng
_a : Optional[Any] = replicate(_a )
_a : str = jax.random.split(_a ,jax.device_count() )
_a : Dict = shard(_a )
_a : int = shard(_a )
_a : int = shard(_a )
_a : Union[str, Any] = pipeline(
_a ,_a ,_a ,_a ,_a ,_a ,jit=_a )
_a : Union[str, Any] = output.images.reshape(_a ,512 ,512 ,3 )
_a : Union[str, Any] = images[0, 253:256, 253:256, -1]
_a : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_a : Union[str, Any] = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 5 | 1 |
'''simple docstring'''
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Dict ,_a : list ):
'''simple docstring'''
_a : Tuple = set_counts
_a : Union[str, Any] = max(_a )
_a : Optional[int] = len(_a )
_a : Union[str, Any] = [1] * num_sets
_a : List[str] = list(range(_a ) )
def __lowercase ( self : Any ,_a : int ,_a : int ):
'''simple docstring'''
_a : Tuple = self.get_parent(_a )
_a : Any = self.get_parent(_a )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
_a : Any = 0
_a : List[str] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
_a : Optional[int] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
_a : Tuple = 0
_a : Union[str, Any] = src_parent
_a : List[Any] = self.set_counts[src_parent]
_a : List[str] = max(self.max_set ,_a )
return True
def __lowercase ( self : List[str] ,_a : int ):
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
_a : Any = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 5 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str , __a : str ):
"""simple docstring"""
_a : int = len(__a ) + 1
_a : List[str] = len(__a ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_a : Optional[int] = [[0 for i in range(__a )] for j in range(__a )]
# since string of zero length match pattern of zero length
_a : str = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __a ):
_a : Optional[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __a ):
_a : Dict = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __a ):
for j in range(1 , __a ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_a : Tuple = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_a : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_a : int = dp[i - 1][j]
else:
_a : Any = 0
else:
_a : Optional[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__lowerCAmelCase = """aab"""
__lowerCAmelCase = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 5 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__lowerCAmelCase = None
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
__lowerCAmelCase = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
__lowerCAmelCase = {
"""moussaKam/mbarthez""": 1_0_2_4,
"""moussaKam/barthez""": 1_0_2_4,
"""moussaKam/barthez-orangesum-title""": 1_0_2_4,
}
__lowerCAmelCase = """▁"""
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : int = ['''input_ids''', '''attention_mask''']
__UpperCAmelCase : str = BarthezTokenizer
def __init__( self : int ,_a : Dict=None ,_a : Tuple=None ,_a : List[Any]="<s>" ,_a : Tuple="</s>" ,_a : List[Any]="</s>" ,_a : List[Any]="<s>" ,_a : Tuple="<unk>" ,_a : Optional[Any]="<pad>" ,_a : Any="<mask>" ,**_a : int ,):
'''simple docstring'''
_a : str = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
super().__init__(
_a ,tokenizer_file=_a ,bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,**_a ,)
_a : int = vocab_file
_a : str = False if not self.vocab_file else True
def __lowercase ( self : Tuple ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : Union[str, Any] = [self.cls_token_id]
_a : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowercase ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
_a : Any = [self.sep_token_id]
_a : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowercase ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : List[Any] = os.path.join(
_a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file ,_a )
return (out_vocab_file,)
| 5 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = BlenderbotSmallTokenizer
__UpperCAmelCase : Tuple = False
def __lowercase ( self : List[Any] ):
'''simple docstring'''
super().setUp()
_a : List[str] = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
_a : Tuple = dict(zip(_a ,range(len(_a ) ) ) )
_a : List[Any] = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
_a : List[Any] = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
_a : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_a : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(_a ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(_a ) )
def __lowercase ( self : List[Any] ,**_a : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : Tuple ,_a : int ):
'''simple docstring'''
_a : Optional[Any] = 'adapt act apte'
_a : Dict = 'adapt act apte'
return input_text, output_text
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_a : Union[str, Any] = 'adapt act apte'
_a : Dict = ['adapt', 'act', 'ap@@', 'te']
_a : Tuple = tokenizer.tokenize(_a )
self.assertListEqual(_a ,_a )
_a : List[str] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_a : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : str = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
_a : Union[str, Any] = 'I am a small frog.'
_a : int = tok([src_text] ,padding=_a ,truncation=_a )['input_ids']
_a : str = tok.batch_decode(_a ,skip_special_tokens=_a ,clean_up_tokenization_spaces=_a )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
_a : Union[str, Any] = 'I am a small frog .'
_a : Optional[Any] = '.'
_a : Optional[Any] = tok(_a )['input_ids']
_a : Union[str, Any] = tok(_a )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 5 | 1 |
'''simple docstring'''
import math
def UpperCAmelCase_ (__a : float , __a : float ):
"""simple docstring"""
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_6_0:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__a ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="""malus_law""")
| 5 |
'''simple docstring'''
__lowerCAmelCase = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__lowerCAmelCase = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = 'Morse code here!'
print(__a )
_a : Tuple = encrypt(__a )
print(__a )
_a : str = decrypt(__a )
print(__a )
if __name__ == "__main__":
main()
| 5 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""google/vit-base-patch16-224""": """https://huggingface.co/vit-base-patch16-224/resolve/main/config.json""",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Tuple = '''vit'''
def __init__( self : Union[str, Any] ,_a : Optional[Any]=768 ,_a : Optional[Any]=12 ,_a : Optional[int]=12 ,_a : Union[str, Any]=3072 ,_a : Optional[int]="gelu" ,_a : Dict=0.0 ,_a : List[Any]=0.0 ,_a : str=0.02 ,_a : Optional[Any]=1E-12 ,_a : str=224 ,_a : Any=16 ,_a : List[str]=3 ,_a : Tuple=True ,_a : Optional[int]=16 ,**_a : Union[str, Any] ,):
'''simple docstring'''
super().__init__(**_a )
_a : List[Any] = hidden_size
_a : Tuple = num_hidden_layers
_a : Dict = num_attention_heads
_a : int = intermediate_size
_a : List[Any] = hidden_act
_a : List[str] = hidden_dropout_prob
_a : Tuple = attention_probs_dropout_prob
_a : List[str] = initializer_range
_a : str = layer_norm_eps
_a : List[str] = image_size
_a : Union[str, Any] = patch_size
_a : Tuple = num_channels
_a : Optional[Any] = qkv_bias
_a : List[str] = encoder_stride
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : List[str] = version.parse('''1.11''' )
@property
def __lowercase ( self : str ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return 1E-4
| 5 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls : Optional[Any] ):
'''simple docstring'''
_a : List[Any] = TOKEN
HfFolder.save_token(_a )
@classmethod
def __lowercase ( cls : List[Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-config' )
except HTTPError:
pass
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Any = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('test-config' ,use_auth_token=self._token )
_a : Optional[Any] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a ,repo_id='test-config' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Dict = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Tuple = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' ,use_auth_token=self._token )
_a : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a ,repo_id='valid_org/test-config-org' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
_a : Optional[Any] = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map ,{'AutoConfig': 'custom_configuration.CustomConfig'} )
_a : int = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" ,trust_remote_code=_a )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ ,'CustomConfig' )
self.assertEqual(new_config.attribute ,42 )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_a : int = c.n_embd + 1 # int
_a : str = c.resid_pdrop + 1.0 # float
_a : Dict = not c.scale_attn_weights # bool
_a : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(_a ,c.n_embd ,'mismatch for key: n_embd' )
self.assertEqual(_a ,c.resid_pdrop ,'mismatch for key: resid_pdrop' )
self.assertEqual(_a ,c.scale_attn_weights ,'mismatch for key: scale_attn_weights' )
self.assertEqual(_a ,c.summary_type ,'mismatch for key: summary_type' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : int = PretrainedConfig()
_a : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_a ,['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_a : Dict = [key for key, value in config_common_kwargs.items() if value == getattr(_a ,_a )]
if len(_a ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F""" {', '.join(_a )}.""" )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaises(_a ):
# config is in subfolder, the following should not work without specifying the subfolder
_a : List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_a : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' ,subfolder='bert' )
self.assertIsNotNone(_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[Any] = mock.Mock()
_a : Any = 500
_a : Any = {}
_a : Any = HTTPError
_a : List[Any] = {}
# Download this model to make sure it's in the cache.
_a : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' ,return_value=_a ) as mock_head:
_a : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : int = AutoConfig.from_pretrained('bert-base-cased' )
_a : List[str] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_a )
_a : str = 2
json.dump(configuration.to_dict() ,open(os.path.join(_a ,'config.4.0.0.json' ) ,'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_a : int = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size ,2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_a : Tuple = ['config.42.0.0.json']
_a : int = 768
configuration.save_pretrained(_a )
shutil.move(os.path.join(_a ,'config.4.0.0.json' ) ,os.path.join(_a ,'config.42.0.0.json' ) )
_a : int = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size ,768 )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Tuple = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
_a : Optional[int] = 'v4.0.0'
_a, _a : Tuple = new_transformers.models.auto.AutoConfig.from_pretrained(
_a ,return_unused_kwargs=_a )
self.assertEqual(new_configuration.hidden_size ,2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_a ,{} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_a : str = 'v3.0.0'
_a : Optional[Any] = old_transformers.models.auto.AutoConfig.from_pretrained(_a )
self.assertEqual(old_configuration.hidden_size ,768 )
| 5 | 1 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def UpperCAmelCase_ (__a : NDArray[floataa] , __a : NDArray[floataa] , __a : list[int] , __a : int , ):
"""simple docstring"""
_a, _a : Any = coefficient_matrix.shape
_a, _a : Any = constant_matrix.shape
if rowsa != colsa:
_a : str = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(__a )
if colsa != 1:
_a : Tuple = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(__a )
if rowsa != rowsa:
_a : List[str] = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(__a )
if len(__a ) != rowsa:
_a : Optional[Any] = (
'Number of initial values must be equal to number of rows in coefficient '
f"""matrix but received {len(__a )} and {rowsa}"""
)
raise ValueError(__a )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
_a : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
_a, _a : Dict = table.shape
strictly_diagonally_dominant(__a )
# Iterates the whole matrix for given number of times
for _ in range(__a ):
_a : Any = []
for row in range(__a ):
_a : int = 0
for col in range(__a ):
if col == row:
_a : List[str] = table[row][col]
elif col == cols - 1:
_a : Tuple = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
_a : List[str] = (temp + val) / denom
new_val.append(__a )
_a : Union[str, Any] = new_val
return [float(__a ) for i in new_val]
def UpperCAmelCase_ (__a : NDArray[floataa] ):
"""simple docstring"""
_a, _a : Optional[Any] = table.shape
_a : List[Any] = True
for i in range(0 , __a ):
_a : Union[str, Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__lowerCAmelCase = {
"""169M""": 1_2,
"""430M""": 2_4,
"""1B5""": 2_4,
"""3B""": 3_2,
"""7B""": 3_2,
"""14B""": 4_0,
}
__lowerCAmelCase = {
"""169M""": 7_6_8,
"""430M""": 1_0_2_4,
"""1B5""": 2_0_4_8,
"""3B""": 2_5_6_0,
"""7B""": 4_0_9_6,
"""14B""": 5_1_2_0,
}
def UpperCAmelCase_ (__a : Dict ):
"""simple docstring"""
_a : List[Any] = list(state_dict.keys() )
for name in state_dict_keys:
_a : List[Any] = state_dict.pop(__a )
# emb -> embedding
if name.startswith('emb.' ):
_a : List[str] = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
_a : Dict = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
_a : int = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , __a )
# ffn -> feed_forward
_a : str = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , __a )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
_a : Any = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
_a : int = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
_a : Tuple = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
_a : Tuple = 'rwkv.' + name
_a : List[Any] = weight
return state_dict
def UpperCAmelCase_ (__a : Tuple , __a : Union[str, Any] , __a : List[str] , __a : str=None , __a : List[str]=None , __a : int=False , __a : int=None ):
"""simple docstring"""
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
_a : List[Any] = 5_0_2_7_7
_a : Optional[Any] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
_a : Optional[Any] = PreTrainedTokenizerFast(tokenizer_file=__a )
_a : List[Any] = len(__a )
tokenizer.save_pretrained(__a )
# 2. Build the config
_a : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_a : str = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
_a : str = RwkvConfig(
vocab_size=__a , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__a )
# 3. Download model file then convert state_dict
_a : Tuple = hf_hub_download(__a , __a )
_a : Optional[int] = torch.load(__a , map_location='cpu' )
_a : Dict = convert_state_dict(__a )
# 4. Split in shards and save
_a, _a : List[Any] = shard_checkpoint(__a )
for shard_file, shard in shards.items():
torch.save(__a , os.path.join(__a , __a ) )
if index is not None:
_a : Dict = os.path.join(__a , __a )
# Save the index as well
with open(__a , 'w' , encoding='utf-8' ) as f:
_a : List[Any] = json.dumps(__a , indent=2 , sort_keys=__a ) + '\n'
f.write(__a )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
_a : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_a : Optional[Any] = torch.load(os.path.join(__a , __a ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__a , __a ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
_a : List[str] = AutoModelForCausalLM.from_pretrained(__a )
model.push_to_hub(__a , max_shard_size='2GB' )
tokenizer.push_to_hub(__a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
__lowerCAmelCase = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 5 | 1 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] ,_a : Optional[int] ,_a : Union[str, Any]=2 ,_a : Tuple=3 ,_a : str=4 ,_a : Union[str, Any]=2 ,_a : Union[str, Any]=7 ,_a : str=True ,_a : Union[str, Any]=True ,_a : Dict=True ,_a : Optional[int]=True ,_a : Tuple=99 ,_a : List[str]=36 ,_a : List[Any]=2 ,_a : Optional[Any]=4 ,_a : Optional[int]=37 ,_a : Optional[Any]="gelu" ,_a : Optional[Any]=0.1 ,_a : Any=0.1 ,_a : Optional[int]=512 ,_a : Dict=16 ,_a : Optional[int]=2 ,_a : Dict=0.02 ,_a : Dict=6 ,_a : List[Any]=6 ,_a : Union[str, Any]=3 ,_a : Any=4 ,_a : Tuple=None ,_a : Tuple=1000 ,):
'''simple docstring'''
_a : Tuple = parent
_a : Union[str, Any] = batch_size
_a : str = num_channels
_a : Optional[Any] = image_size
_a : Dict = patch_size
_a : List[str] = is_training
_a : Any = use_input_mask
_a : Optional[Any] = use_token_type_ids
_a : List[Any] = use_labels
_a : Any = vocab_size
_a : Tuple = hidden_size
_a : List[Any] = num_hidden_layers
_a : Union[str, Any] = num_attention_heads
_a : Dict = intermediate_size
_a : List[Any] = hidden_act
_a : Any = hidden_dropout_prob
_a : Optional[int] = attention_probs_dropout_prob
_a : int = max_position_embeddings
_a : Optional[int] = type_vocab_size
_a : Tuple = type_sequence_label_size
_a : Any = initializer_range
_a : Optional[Any] = coordinate_size
_a : Any = shape_size
_a : Union[str, Any] = num_labels
_a : Any = num_choices
_a : Dict = scope
_a : Optional[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_a : Dict = text_seq_length
_a : Union[str, Any] = (image_size // patch_size) ** 2 + 1
_a : Dict = self.text_seq_length + self.image_seq_length
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length] ,self.vocab_size )
_a : Dict = ids_tensor([self.batch_size, self.text_seq_length, 4] ,self.range_bbox )
_a : str = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_a : str = bbox[i, j, 3]
_a : List[str] = bbox[i, j, 1]
_a : str = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
_a : Dict = bbox[i, j, 2]
_a : Optional[int] = bbox[i, j, 0]
_a : Optional[int] = tmp_coordinate
_a : List[Any] = tf.constant(_a )
_a : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Dict = None
if self.use_input_mask:
_a : str = random_attention_mask([self.batch_size, self.text_seq_length] )
_a : Tuple = None
if self.use_token_type_ids:
_a : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] ,self.type_vocab_size )
_a : Union[str, Any] = None
_a : Any = None
if self.use_labels:
_a : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] ,self.num_labels )
_a : Dict = LayoutLMvaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,coordinate_size=self.coordinate_size ,shape_size=self.shape_size ,input_size=self.image_size ,patch_size=self.patch_size ,)
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowercase ( self : Optional[Any] ,_a : Any ,_a : List[Any] ,_a : List[str] ,_a : int ,_a : int ,_a : Optional[Any] ):
'''simple docstring'''
_a : Union[str, Any] = TFLayoutLMvaModel(config=_a )
# text + image
_a : List[str] = model(_a ,pixel_values=_a ,training=_a )
_a : Any = model(
_a ,bbox=_a ,pixel_values=_a ,attention_mask=_a ,token_type_ids=_a ,training=_a ,)
_a : str = model(_a ,bbox=_a ,pixel_values=_a ,training=_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
# text only
_a : Tuple = model(_a ,training=_a )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_a : Optional[int] = model({'pixel_values': pixel_values} ,training=_a )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowercase ( self : Tuple ,_a : int ,_a : Tuple ,_a : List[str] ,_a : Optional[int] ,_a : List[str] ,_a : Union[str, Any] ,_a : Tuple ):
'''simple docstring'''
_a : Optional[Any] = self.num_labels
_a : List[Any] = TFLayoutLMvaForSequenceClassification(config=_a )
_a : List[Any] = model(
_a ,bbox=_a ,pixel_values=_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a ,training=_a ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : List[Any] ,_a : List[Any] ,_a : List[str] ,_a : Optional[int] ,_a : Tuple ,_a : Dict ,_a : Union[str, Any] ,_a : Optional[Any] ):
'''simple docstring'''
_a : Any = self.num_labels
_a : List[Any] = TFLayoutLMvaForTokenClassification(config=_a )
_a : Any = model(
_a ,bbox=_a ,pixel_values=_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a ,training=_a ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.text_seq_length, self.num_labels) )
def __lowercase ( self : List[Any] ,_a : int ,_a : Tuple ,_a : List[Any] ,_a : str ,_a : List[str] ,_a : Tuple ,_a : int ):
'''simple docstring'''
_a : List[Any] = 2
_a : Tuple = TFLayoutLMvaForQuestionAnswering(config=_a )
_a : List[str] = model(
_a ,bbox=_a ,pixel_values=_a ,attention_mask=_a ,token_type_ids=_a ,start_positions=_a ,end_positions=_a ,training=_a ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Tuple = self.prepare_config_and_inputs()
((_a), (_a), (_a), (_a), (_a), (_a), (_a), (_a)) : List[str] = config_and_inputs
_a : List[Any] = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
__UpperCAmelCase : Optional[Any] = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Any = False
def __lowercase ( self : Optional[Any] ,_a : Optional[Any] ,_a : Dict ,_a : Any ,_a : Dict ,_a : List[str] ):
'''simple docstring'''
return True
def __lowercase ( self : Any ,_a : List[Any] ,_a : List[Any] ,_a : Tuple=False ):
'''simple docstring'''
_a : Optional[Any] = copy.deepcopy(_a )
if model_class in get_values(_a ):
_a : Optional[int] = {
k: tf.tile(tf.expand_dims(_a ,1 ) ,(1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(_a ,tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_a ):
_a : Union[str, Any] = tf.ones(self.model_tester.batch_size ,dtype=tf.intaa )
elif model_class in get_values(_a ):
_a : List[str] = tf.zeros(self.model_tester.batch_size ,dtype=tf.intaa )
_a : Optional[int] = tf.zeros(self.model_tester.batch_size ,dtype=tf.intaa )
elif model_class in get_values(_a ):
_a : Tuple = tf.zeros(self.model_tester.batch_size ,dtype=tf.intaa )
elif model_class in get_values(_a ):
_a : Optional[Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) ,dtype=tf.intaa )
return inputs_dict
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Any = TFLayoutLMvaModelTester(self )
_a : List[Any] = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a, _a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = model_class(_a )
if getattr(_a ,'hf_compute_loss' ,_a ):
# The number of elements in the loss should be the same as the number of elements in the label
_a : Optional[int] = self._prepare_for_class(inputs_dict.copy() ,_a ,return_labels=_a )
_a : int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() ,reverse=_a )[0]
]
_a : Any = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
_a : str = self._prepare_for_class(inputs_dict.copy() ,_a ,return_labels=_a )
_a : Optional[Any] = prepared_for_class.pop('input_ids' )
_a : List[str] = model(_a ,**_a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
_a : List[str] = self._prepare_for_class(inputs_dict.copy() ,_a ,return_labels=_a )
_a : int = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
_a : int = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
_a : List[Any] = -100
_a : Tuple = tf.convert_to_tensor(_a )
_a : List[Any] = model(_a ,**_a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
_a : Optional[Any] = self._prepare_for_class(inputs_dict.copy() ,_a ,return_labels=_a )
_a : Optional[int] = model(_a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
_a : str = self._prepare_for_class(inputs_dict.copy() ,_a ,return_labels=_a )
# Get keys that were added with the _prepare_for_class function
_a : Union[str, Any] = prepared_for_class.keys() - inputs_dict.keys()
_a : List[Any] = inspect.signature(model.call ).parameters
_a : int = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
_a : Any = {0: 'input_ids'}
for label_key in label_keys:
_a : Tuple = signature_names.index(_a )
_a : Union[str, Any] = label_key
_a : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
_a : str = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
_a : Any = prepared_for_class[value]
_a : Any = tuple(_a )
# Send to model
_a : Any = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __lowercase ( self : int ):
'''simple docstring'''
(
(
_a
), (
_a
), (
_a
), (
_a
), (
_a
), (
_a
), (
_a
), (
_a
),
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_a ,_a ,_a ,_a ,_a ,_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
(
(
_a
), (
_a
), (
_a
), (
_a
), (
_a
), (
_a
), (
_a
), (
_a
),
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a : int = type
self.model_tester.create_and_check_model(_a ,_a ,_a ,_a ,_a ,_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
(
(
_a
), (
_a
), (
_a
), (
_a
), (
_a
), (
_a
), (
_a
), (
_a
),
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
_a ,_a ,_a ,_a ,_a ,_a ,_a )
def __lowercase ( self : str ):
'''simple docstring'''
(
(
_a
), (
_a
), (
_a
), (
_a
), (
_a
), (
_a
), (
_a
), (
_a
),
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
_a ,_a ,_a ,_a ,_a ,_a ,_a )
def __lowercase ( self : Any ):
'''simple docstring'''
(
(
_a
), (
_a
), (
_a
), (
_a
), (
_a
), (
_a
), (
_a
), (
_a
),
) : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
_a ,_a ,_a ,_a ,_a ,_a ,_a )
@slow
def __lowercase ( self : int ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Optional[Any] = TFLayoutLMvaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=_a ) if is_vision_available() else None
@slow
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Optional[int] = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
_a : List[str] = self.default_image_processor
_a : Optional[Any] = prepare_img()
_a : Dict = image_processor(images=_a ,return_tensors='tf' ).pixel_values
_a : List[str] = tf.constant([[1, 2]] )
_a : Tuple = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) ,axis=0 )
# forward pass
_a : Any = model(input_ids=_a ,bbox=_a ,pixel_values=_a ,training=_a )
# verify the logits
_a : Tuple = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape ,_a )
_a : Tuple = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] ,_a ,atol=1E-4 ) )
| 5 |
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
__lowerCAmelCase = datasets.logging.get_logger(__name__)
__lowerCAmelCase = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
__lowerCAmelCase = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
__lowerCAmelCase = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://unbabel.github.io/COMET/html/index.html' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'sources': datasets.Value('string' ,id='sequence' ),
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/Unbabel/COMET'] ,reference_urls=[
'https://github.com/Unbabel/COMET',
'https://www.aclweb.org/anthology/2020.emnlp-main.213/',
'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6',
] ,)
def __lowercase ( self : int ,_a : int ):
'''simple docstring'''
if self.config_name == "default":
_a : List[Any] = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) )
else:
_a : List[str] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __lowercase ( self : Tuple ,_a : List[Any] ,_a : Dict ,_a : Optional[Any] ,_a : List[str]=None ,_a : Tuple=False ):
'''simple docstring'''
if gpus is None:
_a : str = 1 if torch.cuda.is_available() else 0
_a : Optional[Any] = {'src': sources, 'mt': predictions, 'ref': references}
_a : Optional[Any] = [dict(zip(_a ,_a ) ) for t in zip(*data.values() )]
_a, _a : Tuple = self.scorer.predict(_a ,gpus=_a ,progress_bar=_a )
return {"mean_score": mean_score, "scores": scores}
| 5 | 1 |
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__lowerCAmelCase = """sshleifer/bart-tiny-random"""
__lowerCAmelCase = """patrickvonplaten/t5-tiny-random"""
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return AutoConfig.from_pretrained(_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a, *_a : Any = create_student_by_copying_alternating_layers(_a ,tempfile.mkdtemp() ,e=1 ,d=1 )
self.assertEqual(student.config.num_hidden_layers ,1 )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a, *_a : int = create_student_by_copying_alternating_layers(_a ,tempfile.mkdtemp() ,e=1 ,d=_a )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a, *_a : Optional[Any] = create_student_by_copying_alternating_layers(_a ,tempfile.mkdtemp() ,e=1 ,d=_a )
self.assertEqual(student.config.encoder_layers ,1 )
self.assertEqual(student.config.decoder_layers ,self.teacher_config.encoder_layers )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a, *_a : Union[str, Any] = create_student_by_copying_alternating_layers(_a ,tempfile.mkdtemp() ,e=1 ,d=1 )
self.assertEqual(student.config.encoder_layers ,1 )
self.assertEqual(student.config.decoder_layers ,1 )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
with self.assertRaises(_a ):
create_student_by_copying_alternating_layers(_a ,tempfile.mkdtemp() ,e=_a ,d=_a )
| 5 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCAmelCase = {
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 |
'''simple docstring'''
import sys
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : List[str] = len(__a )
_a : Dict = [[0 for x in range(__a )] for x in range(__a )]
_a : Union[str, Any] = [[0 for x in range(__a )] for x in range(__a )]
for chain_length in range(2 , __a ):
for a in range(1 , n - chain_length + 1 ):
_a : Tuple = a + chain_length - 1
_a : Any = sys.maxsize
for c in range(__a , __a ):
_a : Optional[Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_a : Dict = cost
_a : Any = c
return matrix, sol
def UpperCAmelCase_ (__a : Tuple , __a : List[str] , __a : Dict ):
"""simple docstring"""
if i == j:
print('A' + str(__a ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(__a , __a , optimal_solution[i][j] )
print_optiomal_solution(__a , optimal_solution[i][j] + 1 , __a )
print(')' , end=' ' )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
_a : Any = len(__a )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_a, _a : Union[str, Any] = matrix_chain_order(__a )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__a , 1 , n - 1 )
if __name__ == "__main__":
main()
| 5 | 1 |
'''simple docstring'''
def UpperCAmelCase_ (__a : int = 1_0**1_2 ):
"""simple docstring"""
_a : List[str] = 1
_a : Optional[int] = 0
_a : Any = 1
_a : List[str] = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase_ (__a : Optional[Any] ):
"""simple docstring"""
_a : int = FileLock(str(tmpdir / 'foo.lock' ) )
_a : List[Any] = FileLock(str(tmpdir / 'foo.lock' ) )
_a : Any = 0.01
with locka.acquire():
with pytest.raises(__a ):
_a : int = time.time()
locka.acquire(__a )
assert time.time() - _start > timeout
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Dict = 'a' * 1_0_0_0 + '.lock'
_a : int = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(__a )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
_a : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__a ):
locka.acquire(0 )
| 5 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = BlenderbotConfig
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : int = '''gelu'''
def __init__( self : Union[str, Any] ,_a : Union[str, Any] ,_a : Tuple=13 ,_a : Dict=7 ,_a : List[Any]=True ,_a : Optional[Any]=False ,_a : Optional[Any]=99 ,_a : List[Any]=32 ,_a : Optional[int]=2 ,_a : List[Any]=4 ,_a : List[Any]=37 ,_a : Tuple=0.1 ,_a : str=0.1 ,_a : List[Any]=20 ,_a : List[str]=2 ,_a : Union[str, Any]=1 ,_a : Optional[int]=0 ,):
'''simple docstring'''
_a : Optional[int] = parent
_a : str = batch_size
_a : List[Any] = seq_length
_a : Dict = is_training
_a : List[str] = use_labels
_a : List[Any] = vocab_size
_a : Tuple = hidden_size
_a : Dict = num_hidden_layers
_a : str = num_attention_heads
_a : int = intermediate_size
_a : Optional[int] = hidden_dropout_prob
_a : Tuple = attention_probs_dropout_prob
_a : Optional[Any] = max_position_embeddings
_a : Union[str, Any] = eos_token_id
_a : int = pad_token_id
_a : Optional[int] = bos_token_id
def __lowercase ( self : int ):
'''simple docstring'''
_a : Dict = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
_a : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
_a : int = tf.concat([input_ids, eos_tensor] ,axis=1 )
_a : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_a : str = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
_a : List[Any] = prepare_blenderbot_inputs_dict(_a ,_a ,_a )
return config, inputs_dict
def __lowercase ( self : List[str] ,_a : Optional[Any] ,_a : int ):
'''simple docstring'''
_a : List[Any] = TFBlenderbotModel(config=_a ).get_decoder()
_a : Dict = inputs_dict['input_ids']
_a : List[str] = input_ids[:1, :]
_a : Any = inputs_dict['attention_mask'][:1, :]
_a : Dict = inputs_dict['head_mask']
_a : Dict = 1
# first forward pass
_a : Optional[int] = model(_a ,attention_mask=_a ,head_mask=_a ,use_cache=_a )
_a, _a : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_a : int = ids_tensor((self.batch_size, 3) ,config.vocab_size )
_a : Tuple = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
_a : Dict = tf.concat([input_ids, next_tokens] ,axis=-1 )
_a : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
_a : int = model(_a ,attention_mask=_a )[0]
_a : List[str] = model(_a ,attention_mask=_a ,past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
_a : Dict = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
_a : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_a : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a ,_a ,rtol=1E-3 )
def UpperCAmelCase_ (__a : Optional[int] , __a : Optional[Any] , __a : str , __a : Optional[int]=None , __a : Any=None , __a : Any=None , __a : str=None , __a : List[Any]=None , ):
"""simple docstring"""
if attention_mask is None:
_a : Dict = tf.cast(tf.math.not_equal(__a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_a : Union[str, Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_a : Optional[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_a : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_a : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__UpperCAmelCase : Dict = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__UpperCAmelCase : Union[str, Any] = (
{
'''conversational''': TFBlenderbotForConditionalGeneration,
'''feature-extraction''': TFBlenderbotModel,
'''summarization''': TFBlenderbotForConditionalGeneration,
'''text2text-generation''': TFBlenderbotForConditionalGeneration,
'''translation''': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Dict = False
__UpperCAmelCase : int = False
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Optional[int] = TFBlenderbotModelTester(self )
_a : List[str] = ConfigTester(self ,config_class=_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
@require_tokenizers
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = ['''My friends are cool but they eat too many carbs.''']
__UpperCAmelCase : Tuple = '''facebook/blenderbot-400M-distill'''
@cached_property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = self.tokenizer(self.src_text ,return_tensors='tf' )
_a : List[Any] = self.model.generate(
model_inputs.input_ids ,)
_a : Dict = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=_a )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 5 |
'''simple docstring'''
def UpperCAmelCase_ (__a : int = 1_0**1_2 ):
"""simple docstring"""
_a : List[str] = 1
_a : Optional[int] = 0
_a : Any = 1
_a : List[str] = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = LxmertTokenizer
__UpperCAmelCase : int = LxmertTokenizerFast
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : List[Any] = True
def __lowercase ( self : Dict ):
'''simple docstring'''
super().setUp()
_a : Dict = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_a : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __lowercase ( self : Optional[Any] ,_a : Tuple ):
'''simple docstring'''
_a : int = 'UNwant\u00E9d,running'
_a : List[Any] = 'unwanted, running'
return input_text, output_text
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : str = self.tokenizer_class(self.vocab_file )
_a : Optional[int] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_a ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,[7, 4, 5, 10, 8, 9] )
def __lowercase ( self : Any ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_a : str = self.get_tokenizer()
_a : Any = self.get_rust_tokenizer()
_a : List[str] = 'I was born in 92000, and this is falsé.'
_a : List[Any] = tokenizer.tokenize(_a )
_a : Optional[int] = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a ,_a )
_a : Optional[int] = tokenizer.encode(_a ,add_special_tokens=_a )
_a : Optional[Any] = rust_tokenizer.encode(_a ,add_special_tokens=_a )
self.assertListEqual(_a ,_a )
_a : Union[str, Any] = self.get_rust_tokenizer()
_a : Dict = tokenizer.encode(_a )
_a : Optional[Any] = rust_tokenizer.encode(_a )
self.assertListEqual(_a ,_a )
| 5 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCAmelCase = {
"""vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""},
"""tokenizer_file""": {
"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"""
},
}
__lowerCAmelCase = {"""mobilebert-uncased""": 5_1_2}
__lowerCAmelCase = {}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = VOCAB_FILES_NAMES
__UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[Any] = MobileBertTokenizer
def __init__( self : Dict ,_a : List[Any]=None ,_a : Optional[Any]=None ,_a : Union[str, Any]=True ,_a : Dict="[UNK]" ,_a : Union[str, Any]="[SEP]" ,_a : Any="[PAD]" ,_a : Optional[int]="[CLS]" ,_a : Optional[Any]="[MASK]" ,_a : Dict=True ,_a : Any=None ,**_a : Optional[Any] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
_a : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_a ) != do_lower_case
or normalizer_state.get('strip_accents' ,_a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_a ) != tokenize_chinese_chars
):
_a : Optional[Any] = getattr(_a ,normalizer_state.pop('type' ) )
_a : Dict = do_lower_case
_a : str = strip_accents
_a : Tuple = tokenize_chinese_chars
_a : Optional[Any] = normalizer_class(**_a )
_a : str = do_lower_case
def __lowercase ( self : Tuple ,_a : Union[str, Any] ,_a : List[str]=None ):
'''simple docstring'''
_a : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
_a : List[str] = [self.sep_token_id]
_a : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : Union[str, Any] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
_a : int = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 5 | 1 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
__lowerCAmelCase = """docs/source/en/_toctree.yml"""
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Any = defaultdict(__a )
for doc in model_doc:
counts[doc["local"]] += 1
_a : List[str] = [key for key, value in counts.items() if value > 1]
_a : str = []
for duplicate_key in duplicates:
_a : Union[str, Any] = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__a , key=lambda __a : s["title"].lower() )
def UpperCAmelCase_ (__a : Optional[int]=False ):
"""simple docstring"""
with open(__a , encoding='utf-8' ) as f:
_a : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
_a : Union[str, Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_a : Union[str, Any] = content[api_idx]['sections']
# Then to the model doc
_a : List[str] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_a : List[str] = api_doc[model_idx]['sections']
_a : List[Any] = [(idx, section) for idx, section in enumerate(__a ) if 'sections' in section]
_a : Tuple = False
for idx, modality_doc in modalities_docs:
_a : List[Any] = modality_doc['sections']
_a : Any = clean_model_doc_toc(__a )
if old_modality_doc != new_modality_doc:
_a : Union[str, Any] = True
if overwrite:
_a : str = new_modality_doc
if diff:
if overwrite:
_a : Dict = model_doc
_a : Dict = api_doc
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__lowerCAmelCase = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 5 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : List[Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_a : Optional[int] = ''
_a : List[str] = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_a, _a : Optional[int] = 0, 0
# length[i] shows the length of palindromic substring with center i
_a : Optional[Any] = [1 for i in range(len(__a ) )]
# for each character in new_string find corresponding palindromic string
_a : Dict = 0
for j in range(len(__a ) ):
_a : Dict = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_a : Optional[int] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_a : str = j - k + 1 # noqa: E741
_a : Any = j + k - 1
# update max_length and start position
if max_length < length[j]:
_a : Union[str, Any] = length[j]
_a : List[str] = j
# create that string
_a : Tuple = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 1 |
'''simple docstring'''
from manim import *
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[str] = Rectangle(height=0.5 ,width=0.5 )
_a : str = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
_a : List[str] = Rectangle(height=0.25 ,width=0.25 )
_a : Dict = [mem.copy() for i in range(6 )]
_a : Tuple = [mem.copy() for i in range(6 )]
_a : Tuple = VGroup(*_a ).arrange(_a ,buff=0 )
_a : List[str] = VGroup(*_a ).arrange(_a ,buff=0 )
_a : List[Any] = VGroup(_a ,_a ).arrange(_a ,buff=0 )
_a : List[str] = Text('CPU' ,font_size=24 )
_a : Union[str, Any] = Group(_a ,_a ).arrange(_a ,buff=0.5 ,aligned_edge=_a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_a )
_a : Optional[Any] = [mem.copy() for i in range(4 )]
_a : Union[str, Any] = VGroup(*_a ).arrange(_a ,buff=0 )
_a : Tuple = Text('GPU' ,font_size=24 )
_a : Optional[int] = Group(_a ,_a ).arrange(_a ,buff=0.5 ,aligned_edge=_a )
gpu.move_to([-1, -1, 0] )
self.add(_a )
_a : str = [mem.copy() for i in range(6 )]
_a : str = VGroup(*_a ).arrange(_a ,buff=0 )
_a : int = Text('Model' ,font_size=24 )
_a : Dict = Group(_a ,_a ).arrange(_a ,buff=0.5 ,aligned_edge=_a )
model.move_to([3, -1.0, 0] )
self.add(_a )
_a : Dict = []
_a : Any = []
for i, rect in enumerate(_a ):
_a : List[Any] = fill.copy().set_fill(_a ,opacity=0.8 )
target.move_to(_a )
model_arr.append(_a )
_a : str = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(_a ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_a )
self.add(*_a ,*_a )
_a : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
_a : List[Any] = [meta_mem.copy() for i in range(6 )]
_a : Tuple = VGroup(*_a ).arrange(_a ,buff=0 )
_a : Tuple = VGroup(*_a ).arrange(_a ,buff=0 )
_a : List[Any] = VGroup(_a ,_a ).arrange(_a ,buff=0 )
_a : List[Any] = Text('Disk' ,font_size=24 )
_a : Any = Group(_a ,_a ).arrange(_a ,buff=0.5 ,aligned_edge=_a )
disk.move_to([-4, -1.25, 0] )
self.add(_a ,_a )
_a : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_a : int = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(_a ,_a )
_a : List[Any] = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" ,font_size=18 ,)
blue_text.next_to(_a ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(_a )
_a : Dict = MarkupText(
F"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(_a ) )
_a : Optional[Any] = Square(0.3 )
input.set_fill(_a ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,_a ,buff=0.5 )
self.play(Write(_a ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=_a ,buff=0.02 )
self.play(MoveToTarget(_a ) )
self.play(FadeOut(_a ) )
_a : List[Any] = Arrow(start=_a ,end=_a ,color=_a ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,_a ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_a : int = MarkupText(
F"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(_a ,run_time=3 ) )
_a : List[Any] = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(_a ) ,Circumscribe(model_arr[0] ,color=_a ,**_a ) ,Circumscribe(model_cpu_arr[0] ,color=_a ,**_a ) ,Circumscribe(gpu_rect[0] ,color=_a ,**_a ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
_a : str = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,_a ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_a : Union[str, Any] = AnimationGroup(
FadeOut(_a ,run_time=0.5 ) ,MoveToTarget(_a ,run_time=0.5 ) ,FadeIn(_a ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(_a )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_a : Any = 0.7
self.play(
Circumscribe(model_arr[i] ,**_a ) ,Circumscribe(cpu_left_col_base[i] ,**_a ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=_a ,**_a ) ,Circumscribe(gpu_rect[0] ,color=_a ,**_a ) ,Circumscribe(model_arr[i + 1] ,color=_a ,**_a ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=_a ,**_a ) ,Circumscribe(cpu_left_col_base[-1] ,color=_a ,**_a ) ,Circumscribe(gpu_rect[0] ,color=_a ,**_a ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
_a : Optional[Any] = a_c
_a : Union[str, Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(_a ) ,FadeOut(_a ,run_time=0.5 ) ,)
_a : Optional[int] = MarkupText(F"""Inference on a model too large for GPU memory\nis successfully completed.""" ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_a ,run_time=3 ) ,MoveToTarget(_a ) )
self.wait()
| 5 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 1 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__lowerCAmelCase = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCAmelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCAmelCase : int = 1_0000
__UpperCAmelCase : Optional[List[str]] = None
__UpperCAmelCase : Optional[datasets.Features] = None
class UpperCAmelCase__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ParquetConfig
def __lowercase ( self : int ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def __lowercase ( self : int ,_a : List[Any] ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_a : Tuple = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a ,(str, list, tuple) ):
_a : int = data_files
if isinstance(_a ,_a ):
_a : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a : int = [dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'files': files} )]
_a : Dict = []
for split_name, files in data_files.items():
if isinstance(_a ,_a ):
_a : Any = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a : Optional[int] = [dl_manager.iter_files(_a ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_a ):
with open(_a ,'rb' ) as f:
_a : Dict = datasets.Features.from_arrow_schema(pq.read_schema(_a ) )
break
splits.append(datasets.SplitGenerator(name=_a ,gen_kwargs={'files': files} ) )
return splits
def __lowercase ( self : List[str] ,_a : pa.Table ):
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_a : List[str] = table_cast(_a ,self.info.features.arrow_schema )
return pa_table
def __lowercase ( self : Any ,_a : Optional[int] ):
'''simple docstring'''
_a : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(_a ) ):
with open(_a ,'rb' ) as f:
_a : int = pq.ParquetFile(_a )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size ,columns=self.config.columns ) ):
_a : Tuple = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"""{file_idx}_{batch_idx}""", self._cast_table(_a )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(_a )}: {e}""" )
raise
| 5 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__lowerCAmelCase = threading.Lock()
__lowerCAmelCase = None
__lowerCAmelCase = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
__lowerCAmelCase = logging.WARNING
__lowerCAmelCase = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Dict = os.getenv('TRANSFORMERS_VERBOSITY' , __a )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def UpperCAmelCase_ ():
"""simple docstring"""
return __name__.split('.' )[0]
def UpperCAmelCase_ ():
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_a : str = logging.StreamHandler() # Set sys.stderr as stream.
_a : Optional[Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
_a : List[Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_a : List[str] = False
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
_a : int = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_a : str = None
def UpperCAmelCase_ ():
"""simple docstring"""
return log_levels
def UpperCAmelCase_ (__a : Optional[str] = None ):
"""simple docstring"""
if name is None:
_a : List[Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__a )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : Union[str, Any] = False
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : Dict = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = _get_library_root_logger().handlers
for handler in handlers:
_a : Union[str, Any] = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Union[str, Any] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__a )
def UpperCAmelCase_ (self : Union[str, Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ):
"""simple docstring"""
_a : Union[str, Any] = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , __a )
if no_advisory_warnings:
return
self.warning(*__a , **__a )
__lowerCAmelCase = warning_advice
@functools.lru_cache(__a )
def UpperCAmelCase_ (self : int , *__a : Optional[Any] , **__a : Any ):
"""simple docstring"""
self.warning(*__a , **__a )
__lowerCAmelCase = warning_once
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Any ,*_a : Tuple ,**_a : int ): # pylint: disable=unused-argument
'''simple docstring'''
_a : int = args[0] if args else None
def __iter__( self : str ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : List[Any] ,_a : int ):
'''simple docstring'''
def empty_fn(*_a : Optional[Any] ,**_a : Any ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : List[str] ):
'''simple docstring'''
return self
def __exit__( self : List[str] ,_a : str ,_a : List[Any] ,_a : str ):
'''simple docstring'''
return
class UpperCAmelCase__ :
"""simple docstring"""
def __call__( self : Union[str, Any] ,*_a : Tuple ,**_a : Tuple ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*_a ,**_a )
else:
return EmptyTqdm(*_a ,**_a )
def __lowercase ( self : str ,*_a : List[Any] ,**_a : Any ):
'''simple docstring'''
_a : Any = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_a ,**_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__lowerCAmelCase = _tqdm_cls()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : str = True
hf_hub_utils.enable_progress_bars()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : Dict = False
hf_hub_utils.disable_progress_bars()
| 5 | 1 |
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : int ,_a : str=None ,_a : int=None ,*_a : Optional[int] ,**_a : List[str] ):
'''simple docstring'''
super().__init__(*_a ,**_a )
if config is None:
assert isinstance(self.model ,_a ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
_a : Tuple = self.model.config
else:
_a : Tuple = config
_a : int = data_args
_a : Tuple = self.config.tgt_vocab_size if isinstance(self.config ,_a ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
' padding..' )
if self.args.label_smoothing == 0:
_a : List[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_a : str = label_smoothed_nll_loss
def __lowercase ( self : Dict ,_a : int ):
'''simple docstring'''
if self.optimizer is None:
_a : Dict = ['bias', 'LayerNorm.weight']
_a : List[str] = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
_a : List[Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_a : List[Any] = Adafactor
_a : Tuple = {'scale_parameter': False, 'relative_step': False}
else:
_a : Tuple = AdamW
_a : int = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
_a : Union[str, Any] = self.args.learning_rate
if self.sharded_ddp:
_a : Optional[int] = OSS(
params=_a ,optim=_a ,**_a ,)
else:
_a : Tuple = optimizer_cls(_a ,**_a )
if self.lr_scheduler is None:
_a : int = self._get_lr_scheduler(_a )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def __lowercase ( self : Union[str, Any] ,_a : Dict ):
'''simple docstring'''
_a : Optional[Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_a : Any = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_a : int = schedule_func(self.optimizer ,num_warmup_steps=self.args.warmup_steps )
else:
_a : Union[str, Any] = schedule_func(
self.optimizer ,num_warmup_steps=self.args.warmup_steps ,num_training_steps=_a )
return scheduler
def __lowercase ( self : List[str] ):
'''simple docstring'''
if isinstance(self.train_dataset ,torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size ,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) ,)
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def __lowercase ( self : List[str] ,_a : Tuple ,_a : Tuple ,_a : int ):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_a : Tuple = model(**_a ,use_cache=_a )[0]
_a : int = self.loss_fn(logits.view(-1 ,logits.shape[-1] ) ,labels.view(-1 ) )
else:
# compute usual loss via models
_a, _a : List[Any] = model(**_a ,labels=_a ,use_cache=_a )[:2]
else:
# compute label smoothed loss
_a : Any = model(**_a ,use_cache=_a )[0]
_a : int = torch.nn.functional.log_softmax(_a ,dim=-1 )
_a, _a : Optional[int] = self.loss_fn(_a ,_a ,self.args.label_smoothing ,ignore_index=self.config.pad_token_id )
return loss, logits
def __lowercase ( self : Union[str, Any] ,_a : int ,_a : Any ):
'''simple docstring'''
_a : List[Any] = inputs.pop('labels' )
_a, _a : Any = self._compute_loss(_a ,_a ,_a )
return loss
def __lowercase ( self : List[str] ,_a : nn.Module ,_a : Dict[str, Union[torch.Tensor, Any]] ,_a : bool ,_a : Optional[List[str]] = None ,):
'''simple docstring'''
_a : List[str] = self._prepare_inputs(_a )
_a : List[str] = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_a : List[Any] = self.model.generate(
inputs['input_ids'] ,attention_mask=inputs['attention_mask'] ,**_a ,)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_a : Union[str, Any] = self._pad_tensors_to_max_len(_a ,gen_kwargs['max_length'] )
_a : Union[str, Any] = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
_a, _a : Tuple = self._compute_loss(_a ,_a ,_a )
_a : Optional[Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_a : Union[str, Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_a : List[str] = self._pad_tensors_to_max_len(_a ,gen_kwargs['max_length'] )
return (loss, logits, labels)
def __lowercase ( self : List[str] ,_a : Optional[int] ,_a : Tuple ):
'''simple docstring'''
_a : str = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F""" padded to `max_length`={max_length}""" )
_a : Dict = pad_token_id * torch.ones(
(tensor.shape[0], max_length) ,dtype=tensor.dtype ,device=tensor.device )
_a : Optional[int] = tensor
return padded_tensor
| 5 |
'''simple docstring'''
def UpperCAmelCase_ (__a : list[int] , __a : list[int] ):
"""simple docstring"""
if not len(__a ) == len(__a ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_a, _a, _a : Tuple = equationa
_a, _a, _a : str = equationa
# Calculate the determinants of the matrices
_a : Union[str, Any] = aa * ba - aa * ba
_a : List[Any] = ca * ba - ca * ba
_a : List[Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_a : int = determinant_x / determinant
_a : List[str] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 5 | 1 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Dict ,_a : List[Any] ,_a : List[str]=2 ,_a : Optional[Any]=True ,_a : Union[str, Any]=False ,_a : List[Any]=10 ,_a : Optional[int]=3 ,_a : List[str]=32 * 8 ,_a : Optional[Any]=32 * 8 ,_a : int=4 ,_a : Union[str, Any]=64 ,):
'''simple docstring'''
_a : Dict = parent
_a : int = batch_size
_a : Optional[Any] = is_training
_a : Any = use_auxiliary_loss
_a : Any = num_queries
_a : Tuple = num_channels
_a : str = min_size
_a : Union[str, Any] = max_size
_a : Union[str, Any] = num_labels
_a : Union[str, Any] = hidden_dim
_a : List[Any] = hidden_dim
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_a )
_a : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=_a )
_a : str = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=_a ) > 0.5
).float()
_a : str = (torch.rand((self.batch_size, self.num_labels) ,device=_a ) > 0.5).long()
_a : Union[str, Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : List[Any] = MaskaFormerConfig(
hidden_size=self.hidden_dim ,)
_a : Tuple = self.num_queries
_a : str = self.num_labels
_a : List[Any] = [1, 1, 1, 1]
_a : List[Any] = self.num_channels
_a : int = 64
_a : str = 128
_a : List[str] = self.hidden_dim
_a : str = self.hidden_dim
_a : Tuple = self.hidden_dim
return config
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a, _a, _a, _a, _a : Dict = self.prepare_config_and_inputs()
_a : List[str] = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def __lowercase ( self : Tuple ,_a : Optional[int] ,_a : List[str] ):
'''simple docstring'''
_a : Any = output.encoder_hidden_states
_a : int = output.pixel_decoder_hidden_states
_a : Union[str, Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_a ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) ,config.decoder_layers )
def __lowercase ( self : str ,_a : int ,_a : str ,_a : List[str] ,_a : Optional[Any]=False ):
'''simple docstring'''
with torch.no_grad():
_a : str = MaskaFormerModel(config=_a )
model.to(_a )
model.eval()
_a : List[Any] = model(pixel_values=_a ,pixel_mask=_a )
_a : Optional[int] = model(_a ,output_hidden_states=_a )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_a ,_a )
def __lowercase ( self : List[str] ,_a : List[str] ,_a : str ,_a : List[Any] ,_a : List[Any] ,_a : Any ):
'''simple docstring'''
_a : List[str] = MaskaFormerForUniversalSegmentation(config=_a )
model.to(_a )
model.eval()
def comm_check_on_output(_a : Optional[int] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_a : Optional[Any] = model(pixel_values=_a ,pixel_mask=_a )
_a : Union[str, Any] = model(_a )
comm_check_on_output(_a )
_a : List[str] = model(
pixel_values=_a ,pixel_mask=_a ,mask_labels=_a ,class_labels=_a )
comm_check_on_output(_a )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__UpperCAmelCase : Tuple = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Any = False
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : List[str] = MaskaFormerModelTester(self )
_a : List[str] = ConfigTester(self ,config_class=_a ,has_text_modality=_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : str ):
'''simple docstring'''
_a, _a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a ,**_a ,output_hidden_states=_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_a )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def __lowercase ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def __lowercase ( self : Tuple ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __lowercase ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowercase ( self : Any ):
'''simple docstring'''
pass
def __lowercase ( self : Dict ):
'''simple docstring'''
_a, _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] = model_class(_a )
_a : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : List[str] = [*signature.parameters.keys()]
_a : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
@slow
def __lowercase ( self : List[Any] ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_a : List[str] = MaskaFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Optional[int] = (self.model_tester.min_size,) * 2
_a : List[str] = {
'pixel_values': torch.randn((2, 3, *size) ,device=_a ),
'mask_labels': torch.randn((2, 10, *size) ,device=_a ),
'class_labels': torch.zeros(2 ,10 ,device=_a ).long(),
}
_a : str = self.model_tester.get_config()
_a : str = MaskaFormerForUniversalSegmentation(_a ).to(_a )
_a : int = model(**_a )
self.assertTrue(outputs.loss is not None )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a, _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a ,**_a ,output_hidden_states=_a )
def __lowercase ( self : int ):
'''simple docstring'''
_a, _a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] = model_class(_a ).to(_a )
_a : Optional[int] = model(**_a ,output_attentions=_a )
self.assertTrue(outputs.attentions is not None )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_a : Tuple = self.all_model_classes[1]
_a, _a, _a, _a, _a : str = self.model_tester.prepare_config_and_inputs()
_a : Union[str, Any] = model_class(_a )
model.to(_a )
model.train()
_a : str = model(_a ,mask_labels=_a ,class_labels=_a ).loss
loss.backward()
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : List[str] = self.all_model_classes[1]
_a, _a, _a, _a, _a : Tuple = self.model_tester.prepare_config_and_inputs()
_a : Optional[Any] = True
_a : Any = True
_a : Tuple = model_class(_a ).to(_a )
model.train()
_a : List[Any] = model(_a ,mask_labels=_a ,class_labels=_a )
_a : Optional[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_a : Any = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_a : int = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_a : str = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_a )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__lowerCAmelCase = 1e-4
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Tuple ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def __lowercase ( self : str ):
'''simple docstring'''
_a : List[str] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_a )
_a : str = self.default_image_processor
_a : List[str] = prepare_img()
_a : int = image_processor(_a ,return_tensors='pt' ).to(_a )
_a : List[str] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a ,(1, 3, 384, 384) )
with torch.no_grad():
_a : Any = model(**_a )
_a : List[Any] = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,_a ,atol=_a ) )
_a : Union[str, Any] = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,_a ,atol=_a ) )
_a : Optional[Any] = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,_a ,atol=_a ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : List[str] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
_a : Dict = self.default_image_processor
_a : Any = prepare_img()
_a : Union[str, Any] = image_processor(_a ,return_tensors='pt' ).to(_a )
_a : Union[str, Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a ,(1, 3, 384, 384) )
with torch.no_grad():
_a : Dict = model(**_a )
# masks_queries_logits
_a : Any = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_a : Union[str, Any] = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
_a : Dict = torch.tensor(_a ).to(_a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,_a ,atol=_a ) )
# class_queries_logits
_a : Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) )
_a : Any = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,_a ,atol=_a ) )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
_a : Tuple = self.default_image_processor
_a : List[str] = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors='pt' ,)
_a : Tuple = inputs['pixel_values'].to(_a )
_a : Optional[Any] = [el.to(_a ) for el in inputs['mask_labels']]
_a : List[Any] = [el.to(_a ) for el in inputs['class_labels']]
with torch.no_grad():
_a : int = model(**_a )
self.assertTrue(outputs.loss is not None )
| 5 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ,_a : List[str] ,_a : Optional[Any]=13 ,_a : str=30 ,_a : str=2 ,_a : Union[str, Any]=3 ,_a : Optional[Any]=True ,_a : int=True ,_a : Union[str, Any]=32 ,_a : List[Any]=5 ,_a : Union[str, Any]=4 ,_a : int=37 ,_a : Any="gelu" ,_a : Union[str, Any]=0.1 ,_a : str=0.1 ,_a : List[str]=10 ,_a : Dict=0.02 ,_a : Tuple=None ,):
'''simple docstring'''
_a : Any = parent
_a : int = batch_size
_a : List[Any] = image_size
_a : Optional[int] = patch_size
_a : List[str] = num_channels
_a : Dict = is_training
_a : Dict = use_labels
_a : Optional[Any] = hidden_size
_a : str = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : Dict = intermediate_size
_a : Union[str, Any] = hidden_act
_a : List[str] = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : List[str] = type_sequence_label_size
_a : int = initializer_range
_a : List[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_a : Union[str, Any] = (image_size // patch_size) ** 2
_a : Tuple = num_patches + 1
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : str = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : List[str] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def __lowercase ( self : Tuple ,_a : Any ,_a : List[Any] ,_a : int ):
'''simple docstring'''
_a : str = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
_a : int = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : List[Any] ,_a : str ,_a : Tuple ,_a : Dict ):
'''simple docstring'''
_a : Tuple = self.type_sequence_label_size
_a : int = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Dict = model(_a ,labels=_a )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a : int = 1
_a : Optional[Any] = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = self.prepare_config_and_inputs()
_a, _a, _a : int = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCAmelCase : List[Any] = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : List[str] = ViTMSNModelTester(self )
_a : Optional[int] = ConfigTester(self ,config_class=_a ,has_text_modality=_a ,hidden_size=37 )
def __lowercase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a, _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a ,nn.Linear ) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a, _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[str] = model_class(_a )
_a : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : List[Any] = [*signature.parameters.keys()]
_a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self : int ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Dict = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(2 )
_a : List[str] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(_a )
_a : List[str] = self.default_image_processor
_a : int = prepare_img()
_a : Tuple = image_processor(images=_a ,return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
_a : Optional[int] = model(**_a )
# verify the logits
_a : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_a )
_a : List[Any] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_a ,atol=1E-4 ) )
| 5 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCAmelCase = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
__lowerCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": 5_1_2,
"""google/realm-cc-news-pretrained-encoder""": 5_1_2,
"""google/realm-cc-news-pretrained-scorer""": 5_1_2,
"""google/realm-cc-news-pretrained-openqa""": 5_1_2,
"""google/realm-orqa-nq-openqa""": 5_1_2,
"""google/realm-orqa-nq-reader""": 5_1_2,
"""google/realm-orqa-wq-openqa""": 5_1_2,
"""google/realm-orqa-wq-reader""": 5_1_2,
}
__lowerCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
__UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Dict = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Union[str, Any] = RealmTokenizer
def __init__( self : Optional[int] ,_a : Any=None ,_a : int=None ,_a : List[str]=True ,_a : Optional[int]="[UNK]" ,_a : Tuple="[SEP]" ,_a : Union[str, Any]="[PAD]" ,_a : List[Any]="[CLS]" ,_a : Dict="[MASK]" ,_a : Tuple=True ,_a : Tuple=None ,**_a : List[Any] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
_a : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_a ) != do_lower_case
or normalizer_state.get('strip_accents' ,_a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_a ) != tokenize_chinese_chars
):
_a : List[Any] = getattr(_a ,normalizer_state.pop('type' ) )
_a : Any = do_lower_case
_a : int = strip_accents
_a : Tuple = tokenize_chinese_chars
_a : List[Any] = normalizer_class(**_a )
_a : Dict = do_lower_case
def __lowercase ( self : Optional[int] ,_a : int ,**_a : List[Any] ):
'''simple docstring'''
_a : str = PaddingStrategy.MAX_LENGTH
_a : Tuple = text
_a : Tuple = kwargs.pop('text_pair' ,_a )
_a : Tuple = kwargs.pop('return_tensors' ,_a )
_a : Tuple = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(_a ):
if batch_text_pair is not None:
_a : int = batch_text_pair[idx]
else:
_a : Tuple = None
_a : List[Any] = super().__call__(_a ,_a ,return_tensors=_a ,**_a )
_a : List[str] = encoded_candidates.get('input_ids' )
_a : int = encoded_candidates.get('attention_mask' )
_a : Tuple = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(_a )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_a )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_a )
_a : List[Any] = {key: item for key, item in output_data.items() if len(_a ) != 0}
return BatchEncoding(_a ,tensor_type=_a )
def __lowercase ( self : Any ,_a : Union[str, Any] ,_a : List[str]=None ):
'''simple docstring'''
_a : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
_a : Any = [self.sep_token_id]
_a : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : Union[str, Any] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
_a : Tuple = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 5 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ (__a : str = "https://www.worldometers.info/coronavirus" ):
"""simple docstring"""
_a : List[str] = BeautifulSoup(requests.get(__a ).text , 'html.parser' )
_a : Dict = soup.findAll('h1' )
_a : Union[str, Any] = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__a , __a )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : torch.FloatTensor
class UpperCAmelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Tuple ,_a : int = 3 ,_a : int = 3 ,_a : Tuple[str] = ("DownEncoderBlock2D",) ,_a : Tuple[str] = ("UpDecoderBlock2D",) ,_a : Tuple[int] = (64,) ,_a : int = 1 ,_a : str = "silu" ,_a : int = 3 ,_a : int = 32 ,_a : int = 256 ,_a : int = 32 ,_a : Optional[int] = None ,_a : float = 0.1_8215 ,_a : str = "group" ,):
'''simple docstring'''
super().__init__()
# pass init params to Encoder
_a : int = Encoder(
in_channels=_a ,out_channels=_a ,down_block_types=_a ,block_out_channels=_a ,layers_per_block=_a ,act_fn=_a ,norm_num_groups=_a ,double_z=_a ,)
_a : Tuple = vq_embed_dim if vq_embed_dim is not None else latent_channels
_a : Optional[Any] = nn.Convad(_a ,_a ,1 )
_a : Tuple = VectorQuantizer(_a ,_a ,beta=0.25 ,remap=_a ,sane_index_shape=_a )
_a : int = nn.Convad(_a ,_a ,1 )
# pass init params to Decoder
_a : Dict = Decoder(
in_channels=_a ,out_channels=_a ,up_block_types=_a ,block_out_channels=_a ,layers_per_block=_a ,act_fn=_a ,norm_num_groups=_a ,norm_type=_a ,)
@apply_forward_hook
def __lowercase ( self : Union[str, Any] ,_a : torch.FloatTensor ,_a : bool = True ):
'''simple docstring'''
_a : Tuple = self.encoder(_a )
_a : Optional[int] = self.quant_conv(_a )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_a )
@apply_forward_hook
def __lowercase ( self : Optional[int] ,_a : torch.FloatTensor ,_a : bool = False ,_a : bool = True ):
'''simple docstring'''
if not force_not_quantize:
_a, _a, _a : List[str] = self.quantize(_a )
else:
_a : str = h
_a : Union[str, Any] = self.post_quant_conv(_a )
_a : List[Any] = self.decoder(_a ,quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_a )
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : bool = True ):
'''simple docstring'''
_a : str = sample
_a : Dict = self.encode(_a ).latents
_a : Tuple = self.decode(_a ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_a )
| 5 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
__lowerCAmelCase = """docs/source/en/_toctree.yml"""
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Any = defaultdict(__a )
for doc in model_doc:
counts[doc["local"]] += 1
_a : List[str] = [key for key, value in counts.items() if value > 1]
_a : str = []
for duplicate_key in duplicates:
_a : Union[str, Any] = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__a , key=lambda __a : s["title"].lower() )
def UpperCAmelCase_ (__a : Optional[int]=False ):
"""simple docstring"""
with open(__a , encoding='utf-8' ) as f:
_a : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
_a : Union[str, Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_a : Union[str, Any] = content[api_idx]['sections']
# Then to the model doc
_a : List[str] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_a : List[str] = api_doc[model_idx]['sections']
_a : List[Any] = [(idx, section) for idx, section in enumerate(__a ) if 'sections' in section]
_a : Tuple = False
for idx, modality_doc in modalities_docs:
_a : List[Any] = modality_doc['sections']
_a : Any = clean_model_doc_toc(__a )
if old_modality_doc != new_modality_doc:
_a : Union[str, Any] = True
if overwrite:
_a : str = new_modality_doc
if diff:
if overwrite:
_a : Dict = model_doc
_a : Dict = api_doc
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__lowerCAmelCase = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 5 | 1 |
'''simple docstring'''
import copy
import re
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = '''hp'''
__UpperCAmelCase : Any = {}
__UpperCAmelCase : Union[str, Any] = None
@classmethod
def __lowercase ( cls : str ,_a : Optional[int] ,_a : Dict ):
'''simple docstring'''
_a : Optional[Any] = prefix
_a : Union[str, Any] = defaults
cls.build_naming_info()
@staticmethod
def __lowercase ( _a : List[str] ,_a : int ):
'''simple docstring'''
if len(_a ) == 0:
return ""
_a : List[Any] = None
if any(char.isdigit() for char in word ):
raise Exception(F"""Parameters should not contain numbers: '{word}' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 ,len(_a ) + 1 ):
_a : Any = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
_a : int = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(_a : Optional[Any] ):
_a : Optional[Any] = ''
while integer != 0:
_a : Union[str, Any] = chr(ord('A' ) + integer % 10 ) + s
integer //= 10
return s
_a : Any = 0
while True:
_a : Any = word + '#' + int_to_alphabetic(_a )
if sword in info["reverse_short_word"]:
continue
else:
_a : Tuple = sword
break
_a : Union[str, Any] = short_word
_a : List[Any] = word
return short_word
@staticmethod
def __lowercase ( _a : int ,_a : Dict ):
'''simple docstring'''
_a : Optional[int] = param_name.split('_' )
_a : Union[str, Any] = [TrialShortNamer.shortname_for_word(_a ,_a ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
_a : Optional[Any] = ['', '_']
for separator in separators:
_a : Optional[Any] = separator.join(_a )
if shortname not in info["reverse_short_param"]:
_a : Tuple = shortname
_a : List[Any] = param_name
return shortname
return param_name
@staticmethod
def __lowercase ( _a : Optional[Any] ,_a : Any ):
'''simple docstring'''
_a : Dict = TrialShortNamer.shortname_for_key(_a ,_a )
_a : Optional[int] = short_name
_a : Optional[int] = param_name
@classmethod
def __lowercase ( cls : Any ):
'''simple docstring'''
if cls.NAMING_INFO is not None:
return
_a : Optional[int] = {
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
_a : str = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(_a ,_a )
_a : Dict = info
@classmethod
def __lowercase ( cls : List[str] ,_a : Tuple ):
'''simple docstring'''
cls.build_naming_info()
assert cls.PREFIX is not None
_a : Tuple = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
_a : str = cls.NAMING_INFO['short_param'][k]
if isinstance(_a ,_a ):
_a : int = 1 if v else 0
_a : str = '' if isinstance(_a ,(int, float) ) else '-'
_a : List[str] = F"""{key}{sep}{v}"""
name.append(_a )
return "_".join(_a )
@classmethod
def __lowercase ( cls : List[str] ,_a : str ):
'''simple docstring'''
_a : int = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
_a : Union[str, Any] = []
else:
_a : Optional[Any] = repr.split('_' )
_a : Any = {}
for value in values:
if "-" in value:
_a, _a : Tuple = value.split('-' )
else:
_a : List[str] = re.sub('[0-9.]' ,'' ,_a )
_a : List[str] = float(re.sub('[^0-9.]' ,'' ,_a ) )
_a : int = cls.NAMING_INFO['reverse_short_param'][p_k]
_a : List[str] = p_v
for k in cls.DEFAULTS:
if k not in parameters:
_a : Optional[int] = cls.DEFAULTS[k]
return parameters
| 5 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) != 3_2:
raise ValueError('Input must be of length 32' )
_a : Any = b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '08x' )[-8:]
_a : str = b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : List[Any] = b''
for char in message:
bit_string += format(__a , '08b' ).encode('utf-8' )
_a : int = format(len(__a ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__a ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(__a ) , 5_1_2 ):
_a : List[Any] = bit_string[pos : pos + 5_1_2]
_a : str = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '032b' )
_a : int = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__a , 2 )
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
return (a + b) % 2**3_2
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : str = preprocess(__a )
_a : Optional[int] = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
_a : int = 0x67_45_23_01
_a : Union[str, Any] = 0xEF_CD_AB_89
_a : str = 0x98_BA_DC_FE
_a : List[Any] = 0x10_32_54_76
_a : Optional[int] = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__a ):
_a : Union[str, Any] = aa
_a : List[Any] = ba
_a : List[Any] = ca
_a : Dict = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a : Optional[int] = d ^ (b & (c ^ d))
_a : Optional[Any] = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a : Optional[Any] = c ^ (d & (b ^ c))
_a : Dict = (5 * i + 1) % 1_6
elif i <= 4_7:
_a : Optional[Any] = b ^ c ^ d
_a : Dict = (3 * i + 5) % 1_6
else:
_a : int = c ^ (b | not_aa(__a ))
_a : List[str] = (7 * i) % 1_6
_a : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**3_2
_a : Union[str, Any] = d
_a : Tuple = c
_a : Optional[int] = b
_a : Union[str, Any] = sum_aa(__a , left_rotate_aa(__a , shift_amounts[i] ) )
# Add hashed chunk to running total
_a : Any = sum_aa(__a , __a )
_a : Dict = sum_aa(__a , __a )
_a : Union[str, Any] = sum_aa(__a , __a )
_a : str = sum_aa(__a , __a )
_a : Optional[Any] = reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 1 |
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__lowerCAmelCase = 1_0
def UpperCAmelCase_ (__a : int , __a : int , __a : list[int] , __a : int ):
"""simple docstring"""
for i in range(__a , __a ):
if array[i] == target:
return i
return -1
def UpperCAmelCase_ (__a : list[int] , __a : int ):
"""simple docstring"""
_a : Union[str, Any] = 0
_a : List[Any] = len(__a )
while left <= right:
if right - left < precision:
return lin_search(__a , __a , __a , __a )
_a : Tuple = (left + right) // 3 + 1
_a : str = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_a : Optional[int] = one_third - 1
elif array[two_third] < target:
_a : Tuple = two_third + 1
else:
_a : Optional[Any] = one_third + 1
_a : Any = two_third - 1
else:
return -1
def UpperCAmelCase_ (__a : int , __a : int , __a : list[int] , __a : int ):
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(__a , __a , __a , __a )
_a : List[str] = (left + right) // 3 + 1
_a : int = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(__a , one_third - 1 , __a , __a )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , __a , __a , __a )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , __a , __a )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase = input("""Enter numbers separated by comma:\n""").strip()
__lowerCAmelCase = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
__lowerCAmelCase = int(input("""Enter the number to be found in the list:\n""").strip())
__lowerCAmelCase = ite_ternary_search(collection, target)
__lowerCAmelCase = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 5 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase_ (__a : str , __a : Dict=0.999 , __a : List[str]="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__a : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__a : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_a : Tuple = []
for i in range(__a ):
_a : Union[str, Any] = i / num_diffusion_timesteps
_a : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__a ) / alpha_bar_fn(__a ) , __a ) )
return torch.tensor(__a , dtype=torch.floataa )
class UpperCAmelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Dict = 2
@register_to_config
def __init__( self : str ,_a : int = 1000 ,_a : float = 0.0_0085 ,_a : float = 0.012 ,_a : str = "linear" ,_a : Optional[Union[np.ndarray, List[float]]] = None ,_a : str = "epsilon" ,_a : Optional[bool] = False ,_a : Optional[bool] = False ,_a : float = 1.0 ,_a : str = "linspace" ,_a : int = 0 ,):
'''simple docstring'''
if trained_betas is not None:
_a : List[str] = torch.tensor(_a ,dtype=torch.floataa )
elif beta_schedule == "linear":
_a : Tuple = torch.linspace(_a ,_a ,_a ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a : List[str] = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,_a ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a : Dict = betas_for_alpha_bar(_a ,alpha_transform_type='cosine' )
elif beta_schedule == "exp":
_a : Tuple = betas_for_alpha_bar(_a ,alpha_transform_type='exp' )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_a : Optional[Any] = 1.0 - self.betas
_a : Optional[int] = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(_a ,_a ,_a )
_a : Optional[int] = use_karras_sigmas
def __lowercase ( self : Any ,_a : Union[str, Any] ,_a : Optional[Any]=None ):
'''simple docstring'''
if schedule_timesteps is None:
_a : List[Any] = self.timesteps
_a : Dict = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a : int = 1 if len(_a ) > 1 else 0
else:
_a : str = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
_a : str = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Union[float, torch.FloatTensor] ,):
'''simple docstring'''
_a : List[Any] = self.index_for_timestep(_a )
_a : Tuple = self.sigmas[step_index]
_a : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowercase ( self : Any ,_a : int ,_a : Union[str, torch.device] = None ,_a : Optional[int] = None ,):
'''simple docstring'''
_a : Optional[Any] = num_inference_steps
_a : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a : Optional[Any] = np.linspace(0 ,num_train_timesteps - 1 ,_a ,dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a : str = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : int = (np.arange(0 ,_a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a : Any = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : Union[str, Any] = (np.arange(_a ,0 ,-step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_a : Tuple = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a : Union[str, Any] = np.log(_a )
_a : str = np.interp(_a ,np.arange(0 ,len(_a ) ) ,_a )
if self.config.use_karras_sigmas:
_a : List[Any] = self._convert_to_karras(in_sigmas=_a ,num_inference_steps=self.num_inference_steps )
_a : Dict = np.array([self._sigma_to_t(_a ,_a ) for sigma in sigmas] )
_a : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a : Union[str, Any] = torch.from_numpy(_a ).to(device=_a )
_a : Any = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_a : List[Any] = torch.from_numpy(_a )
_a : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_a ).startswith('mps' ):
# mps does not support float64
_a : Tuple = timesteps.to(_a ,dtype=torch.floataa )
else:
_a : Dict = timesteps.to(device=_a )
# empty dt and derivative
_a : Tuple = None
_a : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a : Union[str, Any] = defaultdict(_a )
def __lowercase ( self : str ,_a : Dict ,_a : Dict ):
'''simple docstring'''
_a : Optional[int] = np.log(_a )
# get distribution
_a : Union[str, Any] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_a : List[Any] = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_a : Tuple = low_idx + 1
_a : Union[str, Any] = log_sigmas[low_idx]
_a : Optional[Any] = log_sigmas[high_idx]
# interpolate sigmas
_a : Optional[Any] = (low - log_sigma) / (low - high)
_a : List[str] = np.clip(_a ,0 ,1 )
# transform interpolation to time range
_a : Union[str, Any] = (1 - w) * low_idx + w * high_idx
_a : List[str] = t.reshape(sigma.shape )
return t
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Tuple ):
'''simple docstring'''
_a : float = in_sigmas[-1].item()
_a : float = in_sigmas[0].item()
_a : Tuple = 7.0 # 7.0 is the value used in the paper
_a : str = np.linspace(0 ,1 ,_a )
_a : Optional[Any] = sigma_min ** (1 / rho)
_a : Union[str, Any] = sigma_max ** (1 / rho)
_a : str = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return self.dt is None
def __lowercase ( self : int ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : Union[float, torch.FloatTensor] ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : bool = True ,):
'''simple docstring'''
_a : Union[str, Any] = self.index_for_timestep(_a )
# advance index counter by 1
_a : Any = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a : Tuple = self.sigmas[step_index]
_a : int = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_a : List[str] = self.sigmas[step_index - 1]
_a : List[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a : Optional[int] = 0
_a : Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a : Dict = sigma_hat if self.state_in_first_order else sigma_next
_a : Optional[int] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a : List[Any] = sigma_hat if self.state_in_first_order else sigma_next
_a : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_a : Union[str, Any] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
_a : Optional[int] = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a : Optional[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a : Any = sigma_next - sigma_hat
# store for 2nd order step
_a : int = derivative
_a : List[str] = dt
_a : Union[str, Any] = sample
else:
# 2. 2nd order / Heun's method
_a : Dict = (sample - pred_original_sample) / sigma_next
_a : Tuple = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_a : Optional[Any] = self.dt
_a : Union[str, Any] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_a : List[Any] = None
_a : Union[str, Any] = None
_a : Dict = None
_a : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __lowercase ( self : Optional[int] ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,):
'''simple docstring'''
_a : str = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
_a : Dict = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
_a : Optional[Any] = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
_a : int = self.timesteps.to(original_samples.device )
_a : Optional[Any] = timesteps.to(original_samples.device )
_a : Any = [self.index_for_timestep(_a ,_a ) for t in timesteps]
_a : Optional[int] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a : Optional[Any] = sigma.unsqueeze(-1 )
_a : Any = original_samples + noise * sigma
return noisy_samples
def __len__( self : Optional[int] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 5 | 1 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def UpperCAmelCase_ (__a : Optional[Any] ):
"""simple docstring"""
_a : str = [False] * len(__a )
_a : Optional[int] = [-1] * len(__a )
def dfs(__a : Union[str, Any] , __a : str ):
_a : List[str] = True
_a : str = c
for u in graph[v]:
if not visited[u]:
dfs(__a , 1 - c )
for i in range(len(__a ) ):
if not visited[i]:
dfs(__a , 0 )
for i in range(len(__a ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__lowerCAmelCase = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 5 |
'''simple docstring'''
import qiskit
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
_a : Any = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_a : List[Any] = qiskit.QuantumCircuit(__a , __a )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_a : Tuple = qiskit.execute(__a , __a , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__a )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 5 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__lowerCAmelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__lowerCAmelCase = TaTokenizerFast
__lowerCAmelCase = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__lowerCAmelCase = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 5 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def __lowercase ( self : str ):
'''simple docstring'''
_a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_a : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_a : List[str] = 'xvjiarui/stable-diffusion-2-inpainting'
_a, _a : str = FlaxStableDiffusionInpaintPipeline.from_pretrained(_a ,safety_checker=_a )
_a : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
_a : int = jax.random.PRNGKey(0 )
_a : Tuple = 50
_a : Any = jax.device_count()
_a : Dict = num_samples * [prompt]
_a : Optional[Any] = num_samples * [init_image]
_a : str = num_samples * [mask_image]
_a, _a, _a : Optional[Any] = pipeline.prepare_inputs(_a ,_a ,_a )
# shard inputs and rng
_a : Optional[Any] = replicate(_a )
_a : str = jax.random.split(_a ,jax.device_count() )
_a : Dict = shard(_a )
_a : int = shard(_a )
_a : int = shard(_a )
_a : Union[str, Any] = pipeline(
_a ,_a ,_a ,_a ,_a ,_a ,jit=_a )
_a : Union[str, Any] = output.images.reshape(_a ,512 ,512 ,3 )
_a : Union[str, Any] = images[0, 253:256, 253:256, -1]
_a : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_a : Union[str, Any] = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 5 | 1 |
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase_ (__a : Optional[Any] , __a : Tuple=1 ):
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def UpperCAmelCase_ (__a : Optional[int] , __a : Union[str, Any]=0 ):
"""simple docstring"""
_a : Dict = []
for old_item in old_list:
_a : List[str] = old_item.replace('in_layers.0' , 'norm1' )
_a : Any = new_item.replace('in_layers.2' , 'conv1' )
_a : Dict = new_item.replace('out_layers.0' , 'norm2' )
_a : str = new_item.replace('out_layers.3' , 'conv2' )
_a : Tuple = new_item.replace('emb_layers.1' , 'time_emb_proj' )
_a : Dict = new_item.replace('skip_connection' , 'conv_shortcut' )
_a : List[Any] = shave_segments(__a , n_shave_prefix_segments=__a )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase_ (__a : List[str] , __a : Tuple=0 ):
"""simple docstring"""
_a : int = []
for old_item in old_list:
_a : List[str] = old_item
_a : Union[str, Any] = new_item.replace('norm.weight' , 'group_norm.weight' )
_a : Tuple = new_item.replace('norm.bias' , 'group_norm.bias' )
_a : Dict = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
_a : Dict = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
_a : Optional[Any] = shave_segments(__a , n_shave_prefix_segments=__a )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase_ (__a : Optional[Any] , __a : str , __a : Tuple , __a : Union[str, Any]=None , __a : Any=None , __a : Optional[Any]=None ):
"""simple docstring"""
assert isinstance(__a , __a ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_a : str = old_checkpoint[path]
_a : List[Any] = old_tensor.shape[0] // 3
_a : Union[str, Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_a : List[Any] = old_tensor.shape[0] // config['num_head_channels'] // 3
_a : str = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_a, _a, _a : Any = old_tensor.split(channels // num_heads , dim=1 )
_a : Dict = query.reshape(__a )
_a : Tuple = key.reshape(__a )
_a : List[str] = value.reshape(__a )
for path in paths:
_a : List[str] = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_a : int = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
_a : Optional[Any] = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
_a : Tuple = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
_a : Optional[Any] = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_a : Dict = old_checkpoint[path['old']][:, :, 0]
else:
_a : int = old_checkpoint[path['old']]
def UpperCAmelCase_ (__a : List[str] , __a : str ):
"""simple docstring"""
_a : Tuple = {}
_a : Any = checkpoint['time_embed.0.weight']
_a : Dict = checkpoint['time_embed.0.bias']
_a : Dict = checkpoint['time_embed.2.weight']
_a : List[Any] = checkpoint['time_embed.2.bias']
_a : Union[str, Any] = checkpoint['input_blocks.0.0.weight']
_a : Tuple = checkpoint['input_blocks.0.0.bias']
_a : int = checkpoint['out.0.weight']
_a : List[str] = checkpoint['out.0.bias']
_a : List[str] = checkpoint['out.2.weight']
_a : str = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
_a : int = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
_a : Optional[Any] = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(__a )
}
# Retrieves the keys for the middle blocks only
_a : Dict = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
_a : Union[str, Any] = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(__a )
}
# Retrieves the keys for the output blocks only
_a : List[str] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
_a : Optional[Any] = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(__a )
}
for i in range(1 , __a ):
_a : Union[str, Any] = (i - 1) // (config['num_res_blocks'] + 1)
_a : int = (i - 1) % (config['num_res_blocks'] + 1)
_a : List[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
_a : Union[str, Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
_a : List[Any] = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
_a : Optional[Any] = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
_a : Dict = renew_resnet_paths(__a )
_a : List[Any] = {'old': f"""input_blocks.{i}.0""", 'new': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
_a : Optional[int] = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
__a , __a , __a , additional_replacements=[meta_path, resnet_op] , config=__a )
if len(__a ):
_a : List[str] = renew_attention_paths(__a )
_a : Optional[Any] = {
'old': f"""input_blocks.{i}.1""",
'new': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_a : int = {
f"""input_blocks.{i}.1.qkv.bias""": {
'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
__a , __a , __a , additional_replacements=[meta_path] , attention_paths_to_split=__a , config=__a , )
_a : List[str] = middle_blocks[0]
_a : Any = middle_blocks[1]
_a : Tuple = middle_blocks[2]
_a : str = renew_resnet_paths(__a )
assign_to_checkpoint(__a , __a , __a , config=__a )
_a : Union[str, Any] = renew_resnet_paths(__a )
assign_to_checkpoint(__a , __a , __a , config=__a )
_a : Tuple = renew_attention_paths(__a )
_a : Optional[Any] = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
__a , __a , __a , attention_paths_to_split=__a , config=__a )
for i in range(__a ):
_a : Any = i // (config['num_res_blocks'] + 1)
_a : List[str] = i % (config['num_res_blocks'] + 1)
_a : Optional[Any] = [shave_segments(__a , 2 ) for name in output_blocks[i]]
_a : Tuple = {}
for layer in output_block_layers:
_a, _a : int = layer.split('.' )[0], shave_segments(__a , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__a )
else:
_a : Optional[int] = [layer_name]
if len(__a ) > 1:
_a : Any = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
_a : Dict = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
_a : Optional[Any] = renew_resnet_paths(__a )
_a : Union[str, Any] = renew_resnet_paths(__a )
_a : List[Any] = {'old': f"""output_blocks.{i}.0""", 'new': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_a : Dict = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
_a : Tuple = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
_a : List[Any] = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(__a ) == 2:
_a : Tuple = []
if len(__a ):
_a : Optional[int] = renew_attention_paths(__a )
_a : Dict = {
'old': f"""output_blocks.{i}.1""",
'new': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_a : str = {
f"""output_blocks.{i}.1.qkv.bias""": {
'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
__a , __a , __a , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=__a , )
else:
_a : Dict = renew_resnet_paths(__a , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_a : List[Any] = '.'.join(['output_blocks', str(__a ), path['old']] )
_a : Tuple = '.'.join(['up_blocks', str(__a ), 'resnets', str(__a ), path['new']] )
_a : Any = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__lowerCAmelCase = json.loads(f.read())
__lowerCAmelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__lowerCAmelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__lowerCAmelCase = DDPMScheduler.from_config("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
__lowerCAmelCase = VQModel.from_pretrained("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
__lowerCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 5 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str , __a : str ):
"""simple docstring"""
_a : int = len(__a ) + 1
_a : List[str] = len(__a ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_a : Optional[int] = [[0 for i in range(__a )] for j in range(__a )]
# since string of zero length match pattern of zero length
_a : str = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __a ):
_a : Optional[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __a ):
_a : Dict = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __a ):
for j in range(1 , __a ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_a : Tuple = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_a : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_a : int = dp[i - 1][j]
else:
_a : Any = 0
else:
_a : Optional[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__lowerCAmelCase = """aab"""
__lowerCAmelCase = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 5 | 1 |
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@parameterized.expand([(None,), ('foo.json',)] )
def __lowercase ( self : Union[str, Any] ,_a : Optional[Any] ):
'''simple docstring'''
_a : List[str] = GenerationConfig(
do_sample=_a ,temperature=0.7 ,length_penalty=1.0 ,bad_words_ids=[[1, 2, 3], [4, 5]] ,)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a ,config_name=_a )
_a : Any = GenerationConfig.from_pretrained(_a ,config_name=_a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample ,_a )
self.assertEqual(loaded_config.temperature ,0.7 )
self.assertEqual(loaded_config.length_penalty ,1.0 )
self.assertEqual(loaded_config.bad_words_ids ,[[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k ,50 )
self.assertEqual(loaded_config.max_length ,20 )
self.assertEqual(loaded_config.max_time ,_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Union[str, Any] = AutoConfig.from_pretrained('gpt2' )
_a : Any = GenerationConfig.from_model_config(_a )
_a : List[Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(_a ,_a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id ,default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id ,model_config.eos_token_id )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Any = GenerationConfig()
_a : List[str] = {
'max_new_tokens': 1024,
'foo': 'bar',
}
_a : int = copy.deepcopy(_a )
_a : str = generation_config.update(**_a )
# update_kwargs was not modified (no side effects)
self.assertEqual(_a ,_a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens ,1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(_a ,{'foo': 'bar'} )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Any = GenerationConfig()
_a : List[str] = 'bar'
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(_a )
_a : Optional[int] = GenerationConfig.from_pretrained(_a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo ,'bar' )
_a : Dict = GenerationConfig.from_model_config(_a )
assert not hasattr(_a ,'foo' ) # no new kwargs should be initialized if from config
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[Any] = GenerationConfig()
self.assertEqual(default_config.temperature ,1.0 )
self.assertEqual(default_config.do_sample ,_a )
self.assertEqual(default_config.num_beams ,1 )
_a : Tuple = GenerationConfig(
do_sample=_a ,temperature=0.7 ,length_penalty=1.0 ,bad_words_ids=[[1, 2, 3], [4, 5]] ,)
self.assertEqual(config.temperature ,0.7 )
self.assertEqual(config.do_sample ,_a )
self.assertEqual(config.num_beams ,1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a )
_a : Optional[Any] = GenerationConfig.from_pretrained(_a ,temperature=1.0 )
self.assertEqual(loaded_config.temperature ,1.0 )
self.assertEqual(loaded_config.do_sample ,_a )
self.assertEqual(loaded_config.num_beams ,1 ) # default value
@is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls : Tuple ):
'''simple docstring'''
_a : int = TOKEN
HfFolder.save_token(_a )
@classmethod
def __lowercase ( cls : Any ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Optional[int] = GenerationConfig(
do_sample=_a ,temperature=0.7 ,length_penalty=1.0 ,)
config.push_to_hub('test-generation-config' ,use_auth_token=self._token )
_a : Any = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a ,repo_id='test-generation-config' ,push_to_hub=_a ,use_auth_token=self._token )
_a : int = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : List[Any] = GenerationConfig(
do_sample=_a ,temperature=0.7 ,length_penalty=1.0 ,)
config.push_to_hub('valid_org/test-generation-config-org' ,use_auth_token=self._token )
_a : str = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a ,repo_id='valid_org/test-generation-config-org' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Optional[int] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
| 5 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = BlenderbotSmallTokenizer
__UpperCAmelCase : Tuple = False
def __lowercase ( self : List[Any] ):
'''simple docstring'''
super().setUp()
_a : List[str] = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
_a : Tuple = dict(zip(_a ,range(len(_a ) ) ) )
_a : List[Any] = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
_a : List[Any] = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
_a : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_a : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(_a ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(_a ) )
def __lowercase ( self : List[Any] ,**_a : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : Tuple ,_a : int ):
'''simple docstring'''
_a : Optional[Any] = 'adapt act apte'
_a : Dict = 'adapt act apte'
return input_text, output_text
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_a : Union[str, Any] = 'adapt act apte'
_a : Dict = ['adapt', 'act', 'ap@@', 'te']
_a : Tuple = tokenizer.tokenize(_a )
self.assertListEqual(_a ,_a )
_a : List[str] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_a : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : str = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
_a : Union[str, Any] = 'I am a small frog.'
_a : int = tok([src_text] ,padding=_a ,truncation=_a )['input_ids']
_a : str = tok.batch_decode(_a ,skip_special_tokens=_a ,clean_up_tokenization_spaces=_a )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
_a : Union[str, Any] = 'I am a small frog .'
_a : Optional[Any] = '.'
_a : Optional[Any] = tok(_a )['input_ids']
_a : Union[str, Any] = tok(_a )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 5 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = XGLMConfig
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : List[str] = '''gelu'''
def __init__( self : Optional[Any] ,_a : Tuple ,_a : Any=14 ,_a : str=7 ,_a : List[Any]=True ,_a : Dict=True ,_a : Optional[int]=True ,_a : Dict=99 ,_a : str=32 ,_a : List[str]=2 ,_a : List[str]=4 ,_a : str=37 ,_a : Tuple="gelu" ,_a : Any=0.1 ,_a : List[Any]=0.1 ,_a : Optional[Any]=512 ,_a : Optional[int]=0.02 ,):
'''simple docstring'''
_a : Tuple = parent
_a : Union[str, Any] = batch_size
_a : Optional[Any] = seq_length
_a : str = is_training
_a : List[Any] = use_input_mask
_a : Dict = use_labels
_a : List[str] = vocab_size
_a : Optional[int] = d_model
_a : Union[str, Any] = num_hidden_layers
_a : Any = num_attention_heads
_a : List[str] = ffn_dim
_a : str = activation_function
_a : Any = activation_dropout
_a : Dict = attention_dropout
_a : Optional[int] = max_position_embeddings
_a : List[Any] = initializer_range
_a : Union[str, Any] = None
_a : List[Any] = 0
_a : Optional[int] = 2
_a : List[Any] = 1
def __lowercase ( self : Any ):
'''simple docstring'''
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : List[Any] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) ,clip_value_min=0 ,clip_value_max=3 )
_a : List[Any] = None
if self.use_input_mask:
_a : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_a : Dict = self.get_config()
_a : Any = floats_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __lowercase ( self : Tuple ):
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,num_layers=self.num_hidden_layers ,attention_heads=self.num_attention_heads ,ffn_dim=self.ffn_dim ,activation_function=self.activation_function ,activation_dropout=self.activation_dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,use_cache=_a ,bos_token_id=self.bos_token_id ,eos_token_id=self.eos_token_id ,pad_token_id=self.pad_token_id ,return_dict=_a ,)
def __lowercase ( self : Any ):
'''simple docstring'''
_a : int = self.prepare_config_and_inputs()
(
(
_a
), (
_a
), (
_a
), (
_a
),
) : Tuple = config_and_inputs
_a : Tuple = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__UpperCAmelCase : Optional[int] = (TFXGLMForCausalLM,) if is_tf_available() else ()
__UpperCAmelCase : Optional[int] = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
__UpperCAmelCase : Any = False
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Optional[int] = False
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : int = TFXGLMModelTester(self )
_a : List[str] = ConfigTester(self ,config_class=_a ,n_embd=37 )
def __lowercase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : int = TFXGLMModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self : Optional[Any] ,_a : Union[str, Any]=True ):
'''simple docstring'''
_a : Union[str, Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
_a : str = tf.convert_to_tensor([[2, 268, 9865]] ,dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_a : str = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
_a : List[str] = model.generate(_a ,do_sample=_a ,num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() ,_a )
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Union[str, Any] = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
_a : Optional[Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
_a : int = tokenizer('Today is a nice day and' ,return_tensors='tf' )
_a : Optional[int] = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
_a : Optional[int] = model.generate(_a ,do_sample=_a ,seed=[7, 0] )
_a : Any = tokenizer.decode(output_ids[0] ,skip_special_tokens=_a )
_a : List[str] = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(_a ,_a )
@slow
def __lowercase ( self : str ):
'''simple docstring'''
_a : int = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
_a : Union[str, Any] = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
_a : Any = 'left'
# use different length sentences to test batching
_a : Union[str, Any] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
_a : Union[str, Any] = tokenizer(_a ,return_tensors='tf' ,padding=_a )
_a : Dict = inputs['input_ids']
_a : Dict = model.generate(input_ids=_a ,attention_mask=inputs['attention_mask'] ,max_new_tokens=12 )
_a : List[str] = tokenizer(sentences[0] ,return_tensors='tf' ).input_ids
_a : List[str] = model.generate(input_ids=_a ,max_new_tokens=12 )
_a : Optional[Any] = tokenizer(sentences[1] ,return_tensors='tf' ).input_ids
_a : Union[str, Any] = model.generate(input_ids=_a ,max_new_tokens=12 )
_a : Dict = tokenizer.batch_decode(_a ,skip_special_tokens=_a )
_a : Union[str, Any] = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=_a )
_a : Dict = tokenizer.decode(output_padded[0] ,skip_special_tokens=_a )
_a : Any = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(_a ,_a )
self.assertListEqual(_a ,[non_padded_sentence, padded_sentence] )
| 5 |
'''simple docstring'''
__lowerCAmelCase = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__lowerCAmelCase = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = 'Morse code here!'
print(__a )
_a : Tuple = encrypt(__a )
print(__a )
_a : str = decrypt(__a )
print(__a )
if __name__ == "__main__":
main()
| 5 | 1 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ (__a : Union[str, Any] , __a : Optional[int] , __a : List[str]=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
_a : List[Any] = nn.Parameter(__a )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
_a : Tuple = nn.Parameter(__a )
def UpperCAmelCase_ (__a : str , __a : Optional[int] , __a : Any ):
"""simple docstring"""
_a : int = np.asarray(weights[0] )
_a : Optional[Any] = np.asarray(weights[1] )
_a : Tuple = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.output.dense , torch.tensor(__a ).view(-1 , __a ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase_ (__a : int , __a : List[Any] , __a : List[str] ):
"""simple docstring"""
_a : List[Any] = np.asarray(weights[0] )
_a : Optional[Any] = np.asarray(weights[1] )
_a : List[str] = np.asarray(weights[2] )
_a : List[Any] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.output.dense , torch.tensor(__a ).view(-1 , __a ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase_ (__a : Optional[Any] , __a : str , __a : Union[str, Any] ):
"""simple docstring"""
_a : Dict = weights[0][0][0]
_a : Tuple = np.asarray(layer_norm_a[0] )
_a : Dict = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__a ) , torch.tensor(__a ) , )
# lsh weights + output
_a : List[str] = weights[0][1]
if len(__a ) < 4:
set_layer_weights_in_torch_lsh(__a , torch_block.attention , __a )
else:
set_layer_weights_in_torch_local(__a , torch_block.attention , __a )
# intermediate weighs
_a : str = weights[2][0][1][2]
# Chunked Feed Forward
if len(__a ) == 4:
_a : Optional[Any] = intermediate_weights[2]
# layernorm 2
_a : str = np.asarray(intermediate_weights[0][0] )
_a : List[str] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__a ) , torch.tensor(__a ) , )
# intermediate dense
_a : Tuple = np.asarray(intermediate_weights[1][0] )
_a : Dict = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__a ).transpose(0 , 1 ).contiguous() , torch.tensor(__a ) , )
# intermediate out
_a : Union[str, Any] = np.asarray(intermediate_weights[4][0] )
_a : int = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__a ).transpose(0 , 1 ).contiguous() , torch.tensor(__a ) , )
def UpperCAmelCase_ (__a : str , __a : List[Any] , __a : List[str] ):
"""simple docstring"""
_a : Union[str, Any] = torch_model.reformer
# word embeds
_a : List[str] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__a ) , )
if isinstance(weights[3] , __a ):
_a : str = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_a : Union[str, Any] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
_a : int = nn.Parameter(torch.tensor(__a ) )
_a : Optional[Any] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__a ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_a : Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__a , __a , __a )
# output layer norm
_a : str = np.asarray(weights[7][0] )
_a : Any = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__a ) , torch.tensor(__a ) , )
# output embeddings
_a : Dict = np.asarray(weights[9][0] )
_a : List[str] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__a ).transpose(0 , 1 ).contiguous() , torch.tensor(__a ) , )
def UpperCAmelCase_ (__a : Any , __a : Union[str, Any] , __a : Optional[int] ):
"""simple docstring"""
_a : Tuple = ReformerConfig.from_json_file(__a )
print(f"""Building PyTorch model from configuration: {config}""" )
_a : int = ReformerModelWithLMHead(__a )
with open(__a , 'rb' ) as f:
_a : List[str] = pickle.load(__a )['weights']
set_model_weights_in_torch(__a , __a , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowerCAmelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 5 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls : Optional[Any] ):
'''simple docstring'''
_a : List[Any] = TOKEN
HfFolder.save_token(_a )
@classmethod
def __lowercase ( cls : List[Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-config' )
except HTTPError:
pass
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Any = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('test-config' ,use_auth_token=self._token )
_a : Optional[Any] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a ,repo_id='test-config' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Dict = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Tuple = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' ,use_auth_token=self._token )
_a : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a ,repo_id='valid_org/test-config-org' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
_a : Optional[Any] = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map ,{'AutoConfig': 'custom_configuration.CustomConfig'} )
_a : int = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" ,trust_remote_code=_a )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ ,'CustomConfig' )
self.assertEqual(new_config.attribute ,42 )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_a : int = c.n_embd + 1 # int
_a : str = c.resid_pdrop + 1.0 # float
_a : Dict = not c.scale_attn_weights # bool
_a : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(_a ,c.n_embd ,'mismatch for key: n_embd' )
self.assertEqual(_a ,c.resid_pdrop ,'mismatch for key: resid_pdrop' )
self.assertEqual(_a ,c.scale_attn_weights ,'mismatch for key: scale_attn_weights' )
self.assertEqual(_a ,c.summary_type ,'mismatch for key: summary_type' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : int = PretrainedConfig()
_a : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_a ,['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_a : Dict = [key for key, value in config_common_kwargs.items() if value == getattr(_a ,_a )]
if len(_a ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F""" {', '.join(_a )}.""" )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaises(_a ):
# config is in subfolder, the following should not work without specifying the subfolder
_a : List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_a : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' ,subfolder='bert' )
self.assertIsNotNone(_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[Any] = mock.Mock()
_a : Any = 500
_a : Any = {}
_a : Any = HTTPError
_a : List[Any] = {}
# Download this model to make sure it's in the cache.
_a : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' ,return_value=_a ) as mock_head:
_a : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : int = AutoConfig.from_pretrained('bert-base-cased' )
_a : List[str] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_a )
_a : str = 2
json.dump(configuration.to_dict() ,open(os.path.join(_a ,'config.4.0.0.json' ) ,'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_a : int = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size ,2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_a : Tuple = ['config.42.0.0.json']
_a : int = 768
configuration.save_pretrained(_a )
shutil.move(os.path.join(_a ,'config.4.0.0.json' ) ,os.path.join(_a ,'config.42.0.0.json' ) )
_a : int = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size ,768 )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Tuple = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
_a : Optional[int] = 'v4.0.0'
_a, _a : Tuple = new_transformers.models.auto.AutoConfig.from_pretrained(
_a ,return_unused_kwargs=_a )
self.assertEqual(new_configuration.hidden_size ,2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_a ,{} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_a : str = 'v3.0.0'
_a : Optional[Any] = old_transformers.models.auto.AutoConfig.from_pretrained(_a )
self.assertEqual(old_configuration.hidden_size ,768 )
| 5 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__lowerCAmelCase = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if not isinstance(__a , __a ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
_a : Optional[int] = []
for num in range(len(__a ) ):
_a : List[str] = 0
while 2 * i * i <= odd_composites[num]:
_a : Optional[Any] = odd_composites[num] - 2 * i * i
if is_prime(__a ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__a ) == n:
return list_nums
return []
def UpperCAmelCase_ ():
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__lowerCAmelCase = {
"""169M""": 1_2,
"""430M""": 2_4,
"""1B5""": 2_4,
"""3B""": 3_2,
"""7B""": 3_2,
"""14B""": 4_0,
}
__lowerCAmelCase = {
"""169M""": 7_6_8,
"""430M""": 1_0_2_4,
"""1B5""": 2_0_4_8,
"""3B""": 2_5_6_0,
"""7B""": 4_0_9_6,
"""14B""": 5_1_2_0,
}
def UpperCAmelCase_ (__a : Dict ):
"""simple docstring"""
_a : List[Any] = list(state_dict.keys() )
for name in state_dict_keys:
_a : List[Any] = state_dict.pop(__a )
# emb -> embedding
if name.startswith('emb.' ):
_a : List[str] = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
_a : Dict = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
_a : int = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , __a )
# ffn -> feed_forward
_a : str = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , __a )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
_a : Any = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
_a : int = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
_a : Tuple = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
_a : Tuple = 'rwkv.' + name
_a : List[Any] = weight
return state_dict
def UpperCAmelCase_ (__a : Tuple , __a : Union[str, Any] , __a : List[str] , __a : str=None , __a : List[str]=None , __a : int=False , __a : int=None ):
"""simple docstring"""
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
_a : List[Any] = 5_0_2_7_7
_a : Optional[Any] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
_a : Optional[Any] = PreTrainedTokenizerFast(tokenizer_file=__a )
_a : List[Any] = len(__a )
tokenizer.save_pretrained(__a )
# 2. Build the config
_a : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_a : str = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
_a : str = RwkvConfig(
vocab_size=__a , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__a )
# 3. Download model file then convert state_dict
_a : Tuple = hf_hub_download(__a , __a )
_a : Optional[int] = torch.load(__a , map_location='cpu' )
_a : Dict = convert_state_dict(__a )
# 4. Split in shards and save
_a, _a : List[Any] = shard_checkpoint(__a )
for shard_file, shard in shards.items():
torch.save(__a , os.path.join(__a , __a ) )
if index is not None:
_a : Dict = os.path.join(__a , __a )
# Save the index as well
with open(__a , 'w' , encoding='utf-8' ) as f:
_a : List[Any] = json.dumps(__a , indent=2 , sort_keys=__a ) + '\n'
f.write(__a )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
_a : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_a : Optional[Any] = torch.load(os.path.join(__a , __a ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__a , __a ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
_a : List[str] = AutoModelForCausalLM.from_pretrained(__a )
model.push_to_hub(__a , max_shard_size='2GB' )
tokenizer.push_to_hub(__a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
__lowerCAmelCase = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 5 | 1 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase_ (__a : str , __a : Dict=0.999 , __a : List[str]="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__a : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__a : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_a : Tuple = []
for i in range(__a ):
_a : Union[str, Any] = i / num_diffusion_timesteps
_a : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__a ) / alpha_bar_fn(__a ) , __a ) )
return torch.tensor(__a , dtype=torch.floataa )
class UpperCAmelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Dict = 2
@register_to_config
def __init__( self : str ,_a : int = 1000 ,_a : float = 0.0_0085 ,_a : float = 0.012 ,_a : str = "linear" ,_a : Optional[Union[np.ndarray, List[float]]] = None ,_a : str = "epsilon" ,_a : Optional[bool] = False ,_a : Optional[bool] = False ,_a : float = 1.0 ,_a : str = "linspace" ,_a : int = 0 ,):
'''simple docstring'''
if trained_betas is not None:
_a : List[str] = torch.tensor(_a ,dtype=torch.floataa )
elif beta_schedule == "linear":
_a : Tuple = torch.linspace(_a ,_a ,_a ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a : List[str] = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,_a ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a : Dict = betas_for_alpha_bar(_a ,alpha_transform_type='cosine' )
elif beta_schedule == "exp":
_a : Tuple = betas_for_alpha_bar(_a ,alpha_transform_type='exp' )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_a : Optional[Any] = 1.0 - self.betas
_a : Optional[int] = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(_a ,_a ,_a )
_a : Optional[int] = use_karras_sigmas
def __lowercase ( self : Any ,_a : Union[str, Any] ,_a : Optional[Any]=None ):
'''simple docstring'''
if schedule_timesteps is None:
_a : List[Any] = self.timesteps
_a : Dict = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a : int = 1 if len(_a ) > 1 else 0
else:
_a : str = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
_a : str = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Union[float, torch.FloatTensor] ,):
'''simple docstring'''
_a : List[Any] = self.index_for_timestep(_a )
_a : Tuple = self.sigmas[step_index]
_a : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowercase ( self : Any ,_a : int ,_a : Union[str, torch.device] = None ,_a : Optional[int] = None ,):
'''simple docstring'''
_a : Optional[Any] = num_inference_steps
_a : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a : Optional[Any] = np.linspace(0 ,num_train_timesteps - 1 ,_a ,dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a : str = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : int = (np.arange(0 ,_a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a : Any = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : Union[str, Any] = (np.arange(_a ,0 ,-step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_a : Tuple = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a : Union[str, Any] = np.log(_a )
_a : str = np.interp(_a ,np.arange(0 ,len(_a ) ) ,_a )
if self.config.use_karras_sigmas:
_a : List[Any] = self._convert_to_karras(in_sigmas=_a ,num_inference_steps=self.num_inference_steps )
_a : Dict = np.array([self._sigma_to_t(_a ,_a ) for sigma in sigmas] )
_a : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a : Union[str, Any] = torch.from_numpy(_a ).to(device=_a )
_a : Any = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_a : List[Any] = torch.from_numpy(_a )
_a : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_a ).startswith('mps' ):
# mps does not support float64
_a : Tuple = timesteps.to(_a ,dtype=torch.floataa )
else:
_a : Dict = timesteps.to(device=_a )
# empty dt and derivative
_a : Tuple = None
_a : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a : Union[str, Any] = defaultdict(_a )
def __lowercase ( self : str ,_a : Dict ,_a : Dict ):
'''simple docstring'''
_a : Optional[int] = np.log(_a )
# get distribution
_a : Union[str, Any] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_a : List[Any] = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_a : Tuple = low_idx + 1
_a : Union[str, Any] = log_sigmas[low_idx]
_a : Optional[Any] = log_sigmas[high_idx]
# interpolate sigmas
_a : Optional[Any] = (low - log_sigma) / (low - high)
_a : List[str] = np.clip(_a ,0 ,1 )
# transform interpolation to time range
_a : Union[str, Any] = (1 - w) * low_idx + w * high_idx
_a : List[str] = t.reshape(sigma.shape )
return t
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Tuple ):
'''simple docstring'''
_a : float = in_sigmas[-1].item()
_a : float = in_sigmas[0].item()
_a : Tuple = 7.0 # 7.0 is the value used in the paper
_a : str = np.linspace(0 ,1 ,_a )
_a : Optional[Any] = sigma_min ** (1 / rho)
_a : Union[str, Any] = sigma_max ** (1 / rho)
_a : str = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return self.dt is None
def __lowercase ( self : int ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : Union[float, torch.FloatTensor] ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : bool = True ,):
'''simple docstring'''
_a : Union[str, Any] = self.index_for_timestep(_a )
# advance index counter by 1
_a : Any = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a : Tuple = self.sigmas[step_index]
_a : int = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_a : List[str] = self.sigmas[step_index - 1]
_a : List[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a : Optional[int] = 0
_a : Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a : Dict = sigma_hat if self.state_in_first_order else sigma_next
_a : Optional[int] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a : List[Any] = sigma_hat if self.state_in_first_order else sigma_next
_a : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_a : Union[str, Any] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
_a : Optional[int] = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a : Optional[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a : Any = sigma_next - sigma_hat
# store for 2nd order step
_a : int = derivative
_a : List[str] = dt
_a : Union[str, Any] = sample
else:
# 2. 2nd order / Heun's method
_a : Dict = (sample - pred_original_sample) / sigma_next
_a : Tuple = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_a : Optional[Any] = self.dt
_a : Union[str, Any] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_a : List[Any] = None
_a : Union[str, Any] = None
_a : Dict = None
_a : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __lowercase ( self : Optional[int] ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,):
'''simple docstring'''
_a : str = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
_a : Dict = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
_a : Optional[Any] = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
_a : int = self.timesteps.to(original_samples.device )
_a : Optional[Any] = timesteps.to(original_samples.device )
_a : Any = [self.index_for_timestep(_a ,_a ) for t in timesteps]
_a : Optional[int] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a : Optional[Any] = sigma.unsqueeze(-1 )
_a : Any = original_samples + noise * sigma
return noisy_samples
def __len__( self : Optional[int] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 5 |
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
__lowerCAmelCase = datasets.logging.get_logger(__name__)
__lowerCAmelCase = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
__lowerCAmelCase = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
__lowerCAmelCase = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://unbabel.github.io/COMET/html/index.html' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'sources': datasets.Value('string' ,id='sequence' ),
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/Unbabel/COMET'] ,reference_urls=[
'https://github.com/Unbabel/COMET',
'https://www.aclweb.org/anthology/2020.emnlp-main.213/',
'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6',
] ,)
def __lowercase ( self : int ,_a : int ):
'''simple docstring'''
if self.config_name == "default":
_a : List[Any] = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) )
else:
_a : List[str] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __lowercase ( self : Tuple ,_a : List[Any] ,_a : Dict ,_a : Optional[Any] ,_a : List[str]=None ,_a : Tuple=False ):
'''simple docstring'''
if gpus is None:
_a : str = 1 if torch.cuda.is_available() else 0
_a : Optional[Any] = {'src': sources, 'mt': predictions, 'ref': references}
_a : Optional[Any] = [dict(zip(_a ,_a ) ) for t in zip(*data.values() )]
_a, _a : Tuple = self.scorer.predict(_a ,gpus=_a ,progress_bar=_a )
return {"mean_score": mean_score, "scores": scores}
| 5 | 1 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __lt__( self : Optional[int] ,_a : Any ):
'''simple docstring'''
return self[-1] < other[-1]
def __eq__( self : List[str] ,_a : int ):
'''simple docstring'''
return self[-1] == other[-1]
def UpperCAmelCase_ (__a : list ):
"""simple docstring"""
_a : list[Stack] = []
# sort into stacks
for element in collection:
_a : str = Stack([element] )
_a : Optional[int] = bisect_left(__a , __a )
if i != len(__a ):
stacks[i].append(__a )
else:
stacks.append(__a )
# use a heap-based merge to merge stack efficiently
_a : Union[str, Any] = merge(*(reversed(__a ) for stack in stacks) )
return collection
if __name__ == "__main__":
__lowerCAmelCase = input("""Enter numbers separated by a comma:\n""").strip()
__lowerCAmelCase = [int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 5 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 | 1 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[int] ,_a : bool ,_a : Optional[int] = None ,_a : Optional[int] = None ):
'''simple docstring'''
super().__init__()
_a : List[Any] = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_a : Optional[Any] = torch.zeros(_a ,_a )
else:
_a : Tuple = None
_a : Tuple = torch.nn.Parameter(_a )
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : VQModel
__UpperCAmelCase : CLIPTextModel
__UpperCAmelCase : CLIPTokenizer
__UpperCAmelCase : TransformeraDModel
__UpperCAmelCase : LearnedClassifierFreeSamplingEmbeddings
__UpperCAmelCase : VQDiffusionScheduler
def __init__( self : Tuple ,_a : VQModel ,_a : CLIPTextModel ,_a : CLIPTokenizer ,_a : TransformeraDModel ,_a : VQDiffusionScheduler ,_a : LearnedClassifierFreeSamplingEmbeddings ,):
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=_a ,transformer=_a ,text_encoder=_a ,tokenizer=_a ,scheduler=_a ,learned_classifier_free_sampling_embeddings=_a ,)
def __lowercase ( self : Union[str, Any] ,_a : Union[str, Any] ,_a : Optional[int] ,_a : Optional[int] ):
'''simple docstring'''
_a : Optional[int] = len(_a ) if isinstance(_a ,_a ) else 1
# get prompt text embeddings
_a : Optional[int] = self.tokenizer(
_a ,padding='max_length' ,max_length=self.tokenizer.model_max_length ,return_tensors='pt' ,)
_a : List[str] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_a : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_a : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
_a : Optional[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_a : List[Any] = prompt_embeds / prompt_embeds.norm(dim=-1 ,keepdim=_a )
# duplicate text embeddings for each generation per prompt
_a : int = prompt_embeds.repeat_interleave(_a ,dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_a : List[Any] = self.learned_classifier_free_sampling_embeddings.embeddings
_a : Union[str, Any] = negative_prompt_embeds.unsqueeze(0 ).repeat(_a ,1 ,1 )
else:
_a : int = [''] * batch_size
_a : str = text_input_ids.shape[-1]
_a : Any = self.tokenizer(
_a ,padding='max_length' ,max_length=_a ,truncation=_a ,return_tensors='pt' ,)
_a : List[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_a : int = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 ,keepdim=_a )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_a : Union[str, Any] = negative_prompt_embeds.shape[1]
_a : List[Any] = negative_prompt_embeds.repeat(1 ,_a ,1 )
_a : Optional[int] = negative_prompt_embeds.view(batch_size * num_images_per_prompt ,_a ,-1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_a : str = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Tuple ,_a : Union[str, List[str]] ,_a : int = 100 ,_a : float = 5.0 ,_a : float = 1.0 ,_a : int = 1 ,_a : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,_a : Optional[torch.FloatTensor] = None ,_a : Optional[str] = "pil" ,_a : bool = True ,_a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,_a : int = 1 ,):
'''simple docstring'''
if isinstance(_a ,_a ):
_a : Dict = 1
elif isinstance(_a ,_a ):
_a : Optional[Any] = len(_a )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(_a )}""" )
_a : Tuple = batch_size * num_images_per_prompt
_a : Any = guidance_scale > 1.0
_a : Union[str, Any] = self._encode_prompt(_a ,_a ,_a )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_a ,_a ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(_a )}.""" )
# get the initial completely masked latents unless the user supplied it
_a : Optional[int] = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
_a : Dict = self.transformer.num_vector_embeds - 1
_a : Tuple = torch.full(_a ,_a ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
F""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
_a : Any = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_a ,device=self.device )
_a : Union[str, Any] = self.scheduler.timesteps.to(self.device )
_a : Tuple = latents
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the sample if we are doing classifier free guidance
_a : List[str] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_a : Tuple = self.transformer(_a ,encoder_hidden_states=_a ,timestep=_a ).sample
if do_classifier_free_guidance:
_a, _a : List[Any] = model_output.chunk(2 )
_a : List[str] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_a ,dim=1 ,keepdim=_a )
_a : Dict = self.truncate(_a ,_a )
# remove `log(0)`'s (`-inf`s)
_a : Dict = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
_a : Dict = self.scheduler.step(_a ,timestep=_a ,sample=_a ,generator=_a ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_a ,_a ,_a )
_a : str = self.vqvae.config.vq_embed_dim
_a : Tuple = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_a : int = self.vqvae.quantize.get_codebook_entry(_a ,shape=_a )
_a : Union[str, Any] = self.vqvae.decode(_a ,force_not_quantize=_a ).sample
_a : Dict = (image / 2 + 0.5).clamp(0 ,1 )
_a : Optional[Any] = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
_a : List[str] = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
def __lowercase ( self : Dict ,_a : torch.FloatTensor ,_a : float ):
'''simple docstring'''
_a, _a : int = torch.sort(_a ,1 ,descending=_a )
_a : str = torch.exp(_a )
_a : List[str] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_a : Union[str, Any] = torch.full_like(keep_mask[:, 0:1, :] ,_a )
_a : Optional[int] = torch.cat((all_true, keep_mask) ,dim=1 )
_a : int = keep_mask[:, :-1, :]
_a : Any = keep_mask.gather(1 ,indices.argsort(1 ) )
_a : Any = log_p_x_0.clone()
_a : List[Any] = -torch.inf # -inf = log(0)
return rv
| 5 |
'''simple docstring'''
import sys
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : List[str] = len(__a )
_a : Dict = [[0 for x in range(__a )] for x in range(__a )]
_a : Union[str, Any] = [[0 for x in range(__a )] for x in range(__a )]
for chain_length in range(2 , __a ):
for a in range(1 , n - chain_length + 1 ):
_a : Tuple = a + chain_length - 1
_a : Any = sys.maxsize
for c in range(__a , __a ):
_a : Optional[Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_a : Dict = cost
_a : Any = c
return matrix, sol
def UpperCAmelCase_ (__a : Tuple , __a : List[str] , __a : Dict ):
"""simple docstring"""
if i == j:
print('A' + str(__a ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(__a , __a , optimal_solution[i][j] )
print_optiomal_solution(__a , optimal_solution[i][j] + 1 , __a )
print(')' , end=' ' )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
_a : Any = len(__a )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_a, _a : Union[str, Any] = matrix_chain_order(__a )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__a , 1 , n - 1 )
if __name__ == "__main__":
main()
| 5 | 1 |
'''simple docstring'''
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__a , int(b / 2 ) ) * actual_power(__a , int(b / 2 ) )
else:
return a * actual_power(__a , int(b / 2 ) ) * actual_power(__a , int(b / 2 ) )
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
if b < 0:
return 1 / actual_power(__a , __a )
return actual_power(__a , __a )
if __name__ == "__main__":
print(power(-2, -3))
| 5 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase_ (__a : Optional[Any] ):
"""simple docstring"""
_a : int = FileLock(str(tmpdir / 'foo.lock' ) )
_a : List[Any] = FileLock(str(tmpdir / 'foo.lock' ) )
_a : Any = 0.01
with locka.acquire():
with pytest.raises(__a ):
_a : int = time.time()
locka.acquire(__a )
assert time.time() - _start > timeout
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Dict = 'a' * 1_0_0_0 + '.lock'
_a : int = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(__a )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
_a : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__a ):
locka.acquire(0 )
| 5 | 1 |
'''simple docstring'''
from timeit import timeit
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if number < 0:
raise ValueError('the value of input must not be negative' )
_a : Tuple = 0
while number:
number &= number - 1
result += 1
return result
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if number < 0:
raise ValueError('the value of input must not be negative' )
_a : Optional[int] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def UpperCAmelCase_ ():
"""simple docstring"""
def do_benchmark(__a : int ) -> None:
_a : Dict = 'import __main__ as z'
print(f"""Benchmark when {number = }:""" )
print(f"""{get_set_bits_count_using_modulo_operator(__a ) = }""" )
_a : Tuple = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=__a )
print(f"""timeit() runs in {timing} seconds""" )
print(f"""{get_set_bits_count_using_brian_kernighans_algorithm(__a ) = }""" )
_a : Dict = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=__a , )
print(f"""timeit() runs in {timing} seconds""" )
for number in (2_5, 3_7, 5_8, 0):
do_benchmark(__a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 5 |
'''simple docstring'''
def UpperCAmelCase_ (__a : int = 1_0**1_2 ):
"""simple docstring"""
_a : List[str] = 1
_a : Optional[int] = 0
_a : Any = 1
_a : List[str] = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 1 |
'''simple docstring'''
import math
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
return math.sqrt(__a ) * math.sqrt(__a ) == num
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
_a : Dict = 0
_a : Tuple = n
while left <= right:
_a : List[Any] = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_a : str = mid - 1
else:
_a : str = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCAmelCase = {
"""vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""},
"""tokenizer_file""": {
"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"""
},
}
__lowerCAmelCase = {"""mobilebert-uncased""": 5_1_2}
__lowerCAmelCase = {}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = VOCAB_FILES_NAMES
__UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[Any] = MobileBertTokenizer
def __init__( self : Dict ,_a : List[Any]=None ,_a : Optional[Any]=None ,_a : Union[str, Any]=True ,_a : Dict="[UNK]" ,_a : Union[str, Any]="[SEP]" ,_a : Any="[PAD]" ,_a : Optional[int]="[CLS]" ,_a : Optional[Any]="[MASK]" ,_a : Dict=True ,_a : Any=None ,**_a : Optional[Any] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
_a : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_a ) != do_lower_case
or normalizer_state.get('strip_accents' ,_a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_a ) != tokenize_chinese_chars
):
_a : Optional[Any] = getattr(_a ,normalizer_state.pop('type' ) )
_a : Dict = do_lower_case
_a : str = strip_accents
_a : Tuple = tokenize_chinese_chars
_a : Optional[Any] = normalizer_class(**_a )
_a : str = do_lower_case
def __lowercase ( self : Tuple ,_a : Union[str, Any] ,_a : List[str]=None ):
'''simple docstring'''
_a : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
_a : List[str] = [self.sep_token_id]
_a : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : Union[str, Any] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
_a : int = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 5 | 1 |
'''simple docstring'''
from math import isqrt
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
_a : Tuple = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __a , __a ):
_a : str = False
return [i for i in range(2 , __a ) if is_prime[i]]
def UpperCAmelCase_ (__a : int = 1_0**8 ):
"""simple docstring"""
_a : Optional[Any] = calculate_prime_numbers(max_number // 2 )
_a : Union[str, Any] = 0
_a : Any = 0
_a : Optional[int] = len(__a ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : List[Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_a : Optional[int] = ''
_a : List[str] = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_a, _a : Optional[int] = 0, 0
# length[i] shows the length of palindromic substring with center i
_a : Optional[Any] = [1 for i in range(len(__a ) )]
# for each character in new_string find corresponding palindromic string
_a : Dict = 0
for j in range(len(__a ) ):
_a : Dict = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_a : Optional[int] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_a : str = j - k + 1 # noqa: E741
_a : Any = j + k - 1
# update max_length and start position
if max_length < length[j]:
_a : Union[str, Any] = length[j]
_a : List[str] = j
# create that string
_a : Tuple = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 1 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str , __a : int ):
"""simple docstring"""
_a : list[list[str]] = [[] for _ in range(__a )]
_a : Optional[int] = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1 or len(__a ) <= key:
return input_string
for position, character in enumerate(__a ):
_a : int = position % (lowest * 2) # puts it in bounds
_a : Tuple = min(__a , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(__a )
_a : List[Any] = [''.join(__a ) for row in temp_grid]
_a : Optional[int] = ''.join(__a )
return output_string
def UpperCAmelCase_ (__a : str , __a : int ):
"""simple docstring"""
_a : Optional[Any] = []
_a : List[str] = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1:
return input_string
_a : list[list[str]] = [[] for _ in range(__a )] # generates template
for position in range(len(__a ) ):
_a : Optional[int] = position % (lowest * 2) # puts it in bounds
_a : List[str] = min(__a , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('*' )
_a : Any = 0
for row in temp_grid: # fills in the characters
_a : str = input_string[counter : counter + len(__a )]
grid.append(list(__a ) )
counter += len(__a )
_a : int = '' # reads as zigzag
for position in range(len(__a ) ):
_a : str = position % (lowest * 2) # puts it in bounds
_a : Union[str, Any] = min(__a , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Optional[int] = {}
for key_guess in range(1 , len(__a ) ): # tries every key
_a : Optional[Any] = decrypt(__a , __a )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 1 |
'''simple docstring'''
import argparse
import datetime
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : List[str] = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
_a : Optional[int] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(__a ) < 1_1:
raise ValueError('Must be 10 characters long' )
# Get month
_a : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 1_3:
raise ValueError('Month must be between 1 - 12' )
_a : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get day
_a : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 3_2:
raise ValueError('Date must be between 1 - 31' )
# Get second separator
_a : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get year
_a : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 4_5 < y < 8_5_0_0:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?' )
# Get datetime obj for validation
_a : Dict = datetime.date(int(__a ) , int(__a ) , int(__a ) )
# Start math
if m <= 2:
_a : Optional[Any] = y - 1
_a : Any = m + 1_2
# maths var
_a : int = int(str(__a )[:2] )
_a : int = int(str(__a )[2:] )
_a : int = int(2.6 * m - 5.39 )
_a : int = int(c / 4 )
_a : int = int(k / 4 )
_a : int = int(d + k )
_a : int = int(t + u + v + x )
_a : int = int(z - (2 * c) )
_a : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.' )
# Response
_a : str = f"""Your date {date_input}, is a {days[str(__a )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase = argparse.ArgumentParser(
description=(
"""Find out what day of the week nearly any date is or was. Enter """
"""date as a string in the mm-dd-yyyy or mm/dd/yyyy format"""
)
)
parser.add_argument(
"""date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)"""
)
__lowerCAmelCase = parser.parse_args()
zeller(args.date_input)
| 5 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__lowerCAmelCase = threading.Lock()
__lowerCAmelCase = None
__lowerCAmelCase = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
__lowerCAmelCase = logging.WARNING
__lowerCAmelCase = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Dict = os.getenv('TRANSFORMERS_VERBOSITY' , __a )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def UpperCAmelCase_ ():
"""simple docstring"""
return __name__.split('.' )[0]
def UpperCAmelCase_ ():
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_a : str = logging.StreamHandler() # Set sys.stderr as stream.
_a : Optional[Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
_a : List[Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_a : List[str] = False
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
_a : int = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_a : str = None
def UpperCAmelCase_ ():
"""simple docstring"""
return log_levels
def UpperCAmelCase_ (__a : Optional[str] = None ):
"""simple docstring"""
if name is None:
_a : List[Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__a )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : Union[str, Any] = False
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : Dict = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = _get_library_root_logger().handlers
for handler in handlers:
_a : Union[str, Any] = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Union[str, Any] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__a )
def UpperCAmelCase_ (self : Union[str, Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ):
"""simple docstring"""
_a : Union[str, Any] = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , __a )
if no_advisory_warnings:
return
self.warning(*__a , **__a )
__lowerCAmelCase = warning_advice
@functools.lru_cache(__a )
def UpperCAmelCase_ (self : int , *__a : Optional[Any] , **__a : Any ):
"""simple docstring"""
self.warning(*__a , **__a )
__lowerCAmelCase = warning_once
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Any ,*_a : Tuple ,**_a : int ): # pylint: disable=unused-argument
'''simple docstring'''
_a : int = args[0] if args else None
def __iter__( self : str ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : List[Any] ,_a : int ):
'''simple docstring'''
def empty_fn(*_a : Optional[Any] ,**_a : Any ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : List[str] ):
'''simple docstring'''
return self
def __exit__( self : List[str] ,_a : str ,_a : List[Any] ,_a : str ):
'''simple docstring'''
return
class UpperCAmelCase__ :
"""simple docstring"""
def __call__( self : Union[str, Any] ,*_a : Tuple ,**_a : Tuple ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*_a ,**_a )
else:
return EmptyTqdm(*_a ,**_a )
def __lowercase ( self : str ,*_a : List[Any] ,**_a : Any ):
'''simple docstring'''
_a : Any = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_a ,**_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__lowerCAmelCase = _tqdm_cls()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : str = True
hf_hub_utils.enable_progress_bars()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : Dict = False
hf_hub_utils.disable_progress_bars()
| 5 | 1 |
'''simple docstring'''
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def UpperCAmelCase_ (__a : bool = True , *__a : List[Any] , **__a : List[str] ):
"""simple docstring"""
if not is_tqdm_available():
raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' )
_a : Optional[int] = False
if main_process_only:
_a : Union[str, Any] = PartialState().local_process_index == 0
return _tqdm(*__a , **__a , disable=__a )
| 5 |
'''simple docstring'''
def UpperCAmelCase_ (__a : list[int] , __a : list[int] ):
"""simple docstring"""
if not len(__a ) == len(__a ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_a, _a, _a : Tuple = equationa
_a, _a, _a : str = equationa
# Calculate the determinants of the matrices
_a : Union[str, Any] = aa * ba - aa * ba
_a : List[Any] = ca * ba - ca * ba
_a : List[Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_a : int = determinant_x / determinant
_a : List[str] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 5 | 1 |
'''simple docstring'''
from __future__ import annotations
__lowerCAmelCase = [True] * 1_0_0_0_0_0_1
__lowerCAmelCase = 2
while i * i <= 1_0_0_0_0_0_0:
if seive[i]:
for j in range(i * i, 1_0_0_0_0_0_1, i):
__lowerCAmelCase = False
i += 1
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
return seive[n]
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
return any(digit in '02468' for digit in str(__a ) )
def UpperCAmelCase_ (__a : int = 1_0_0_0_0_0_0 ):
"""simple docstring"""
_a : Dict = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__a ) and not contains_an_even_digit(__a ):
_a : Union[str, Any] = str(__a )
_a : str = [int(str_num[j:] + str_num[:j] ) for j in range(len(__a ) )]
if all(is_prime(__a ) for i in list_nums ):
result.append(__a )
return result
def UpperCAmelCase_ ():
"""simple docstring"""
return len(find_circular_primes() )
if __name__ == "__main__":
print(f'''{len(find_circular_primes()) = }''')
| 5 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ,_a : List[str] ,_a : Optional[Any]=13 ,_a : str=30 ,_a : str=2 ,_a : Union[str, Any]=3 ,_a : Optional[Any]=True ,_a : int=True ,_a : Union[str, Any]=32 ,_a : List[Any]=5 ,_a : Union[str, Any]=4 ,_a : int=37 ,_a : Any="gelu" ,_a : Union[str, Any]=0.1 ,_a : str=0.1 ,_a : List[str]=10 ,_a : Dict=0.02 ,_a : Tuple=None ,):
'''simple docstring'''
_a : Any = parent
_a : int = batch_size
_a : List[Any] = image_size
_a : Optional[int] = patch_size
_a : List[str] = num_channels
_a : Dict = is_training
_a : Dict = use_labels
_a : Optional[Any] = hidden_size
_a : str = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : Dict = intermediate_size
_a : Union[str, Any] = hidden_act
_a : List[str] = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : List[str] = type_sequence_label_size
_a : int = initializer_range
_a : List[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_a : Union[str, Any] = (image_size // patch_size) ** 2
_a : Tuple = num_patches + 1
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : str = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : List[str] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def __lowercase ( self : Tuple ,_a : Any ,_a : List[Any] ,_a : int ):
'''simple docstring'''
_a : str = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
_a : int = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : List[Any] ,_a : str ,_a : Tuple ,_a : Dict ):
'''simple docstring'''
_a : Tuple = self.type_sequence_label_size
_a : int = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Dict = model(_a ,labels=_a )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a : int = 1
_a : Optional[Any] = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = self.prepare_config_and_inputs()
_a, _a, _a : int = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCAmelCase : List[Any] = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : List[str] = ViTMSNModelTester(self )
_a : Optional[int] = ConfigTester(self ,config_class=_a ,has_text_modality=_a ,hidden_size=37 )
def __lowercase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a, _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a ,nn.Linear ) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a, _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[str] = model_class(_a )
_a : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : List[Any] = [*signature.parameters.keys()]
_a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self : int ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Dict = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(2 )
_a : List[str] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(_a )
_a : List[str] = self.default_image_processor
_a : int = prepare_img()
_a : Tuple = image_processor(images=_a ,return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
_a : Optional[int] = model(**_a )
# verify the logits
_a : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_a )
_a : List[Any] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_a ,atol=1E-4 ) )
| 5 | 1 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ['''image_processor''']
__UpperCAmelCase : List[Any] = '''SamImageProcessor'''
def __init__( self : Dict ,_a : List[str] ):
'''simple docstring'''
super().__init__(_a )
_a : Union[str, Any] = self.image_processor
_a : Optional[int] = -10
_a : Optional[Any] = self.image_processor.size['longest_edge']
def __call__( self : Union[str, Any] ,_a : str=None ,_a : List[Any]=None ,_a : Tuple=None ,_a : Any=None ,_a : Optional[Union[str, TensorType]] = None ,**_a : Any ,):
'''simple docstring'''
_a : Dict = self.image_processor(
_a ,return_tensors=_a ,**_a ,)
# pop arguments that are not used in the foward but used nevertheless
_a : int = encoding_image_processor['original_sizes']
if hasattr(_a ,'numpy' ): # Checks if Torch or TF tensor
_a : List[Any] = original_sizes.numpy()
_a, _a, _a : Union[str, Any] = self._check_and_preprocess_points(
input_points=_a ,input_labels=_a ,input_boxes=_a ,)
_a : List[str] = self._normalize_and_convert(
_a ,_a ,input_points=_a ,input_labels=_a ,input_boxes=_a ,return_tensors=_a ,)
return encoding_image_processor
def __lowercase ( self : List[Any] ,_a : Dict ,_a : List[str] ,_a : List[Any]=None ,_a : int=None ,_a : Any=None ,_a : int="pt" ,):
'''simple docstring'''
if input_points is not None:
if len(_a ) != len(_a ):
_a : Optional[Any] = [
self._normalize_coordinates(self.target_size ,_a ,original_sizes[0] ) for point in input_points
]
else:
_a : Tuple = [
self._normalize_coordinates(self.target_size ,_a ,_a )
for point, original_size in zip(_a ,_a )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_a, _a : List[str] = self._pad_points_and_labels(_a ,_a )
_a : str = np.array(_a )
if input_labels is not None:
_a : Optional[int] = np.array(_a )
if input_boxes is not None:
if len(_a ) != len(_a ):
_a : Optional[int] = [
self._normalize_coordinates(self.target_size ,_a ,original_sizes[0] ,is_bounding_box=_a )
for box in input_boxes
]
else:
_a : Optional[Any] = [
self._normalize_coordinates(self.target_size ,_a ,_a ,is_bounding_box=_a )
for box, original_size in zip(_a ,_a )
]
_a : Any = np.array(_a )
if input_boxes is not None:
if return_tensors == "pt":
_a : Any = torch.from_numpy(_a )
# boxes batch size of 1 by default
_a : Optional[Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_a : Dict = tf.convert_to_tensor(_a )
# boxes batch size of 1 by default
_a : Tuple = tf.expand_dims(_a ,1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_a : Dict = torch.from_numpy(_a )
# point batch size of 1 by default
_a : Optional[Any] = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_a : List[Any] = tf.convert_to_tensor(_a )
# point batch size of 1 by default
_a : Tuple = tf.expand_dims(_a ,1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'input_points': input_points} )
if input_labels is not None:
if return_tensors == "pt":
_a : str = torch.from_numpy(_a )
# point batch size of 1 by default
_a : Any = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_a : Optional[Any] = tf.convert_to_tensor(_a )
# point batch size of 1 by default
_a : Dict = tf.expand_dims(_a ,1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels} )
return encoding_image_processor
def __lowercase ( self : Union[str, Any] ,_a : str ,_a : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = max([point.shape[0] for point in input_points] )
_a : List[str] = []
for i, point in enumerate(_a ):
if point.shape[0] != expected_nb_points:
_a : List[str] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] ,axis=0 )
_a : Optional[int] = np.append(input_labels[i] ,[self.point_pad_value] )
processed_input_points.append(_a )
_a : str = processed_input_points
return input_points, input_labels
def __lowercase ( self : List[str] ,_a : int ,_a : np.ndarray ,_a : List[str] ,_a : Union[str, Any]=False ):
'''simple docstring'''
_a, _a : Any = original_size
_a, _a : int = self.image_processor._get_preprocess_shape(_a ,longest_edge=_a )
_a : Dict = deepcopy(_a ).astype(_a )
if is_bounding_box:
_a : str = coords.reshape(-1 ,2 ,2 )
_a : List[Any] = coords[..., 0] * (new_w / old_w)
_a : str = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_a : List[Any] = coords.reshape(-1 ,4 )
return coords
def __lowercase ( self : str ,_a : str=None ,_a : int=None ,_a : Dict=None ,):
'''simple docstring'''
if input_points is not None:
if hasattr(_a ,'numpy' ): # Checks for TF or Torch tensor
_a : str = input_points.numpy().tolist()
if not isinstance(_a ,_a ) or not isinstance(input_points[0] ,_a ):
raise ValueError('Input points must be a list of list of floating points.' )
_a : int = [np.array(_a ) for input_point in input_points]
else:
_a : List[Any] = None
if input_labels is not None:
if hasattr(_a ,'numpy' ):
_a : Dict = input_labels.numpy().tolist()
if not isinstance(_a ,_a ) or not isinstance(input_labels[0] ,_a ):
raise ValueError('Input labels must be a list of list integers.' )
_a : Tuple = [np.array(_a ) for label in input_labels]
else:
_a : Union[str, Any] = None
if input_boxes is not None:
if hasattr(_a ,'numpy' ):
_a : Optional[Any] = input_boxes.numpy().tolist()
if (
not isinstance(_a ,_a )
or not isinstance(input_boxes[0] ,_a )
or not isinstance(input_boxes[0][0] ,_a )
):
raise ValueError('Input boxes must be a list of list of list of floating points.' )
_a : List[Any] = [np.array(_a ).astype(np.floataa ) for box in input_boxes]
else:
_a : int = None
return input_points, input_labels, input_boxes
@property
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(_a ) )
def __lowercase ( self : Dict ,*_a : int ,**_a : Any ):
'''simple docstring'''
return self.image_processor.post_process_masks(*_a ,**_a )
| 5 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ (__a : str = "https://www.worldometers.info/coronavirus" ):
"""simple docstring"""
_a : List[str] = BeautifulSoup(requests.get(__a ).text , 'html.parser' )
_a : Dict = soup.findAll('h1' )
_a : Union[str, Any] = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__a , __a )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5 | 1 |
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__lowerCAmelCase = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCAmelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCAmelCase : Optional[datasets.Features] = None
__UpperCAmelCase : str = "utf-8"
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : bool = True # deprecated
__UpperCAmelCase : Optional[int] = None # deprecated
__UpperCAmelCase : int = 10 << 20 # 10MB
__UpperCAmelCase : Optional[bool] = None
class UpperCAmelCase__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
__UpperCAmelCase : Any = JsonConfig
def __lowercase ( self : Dict ):
'''simple docstring'''
if self.config.block_size is not None:
logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead' )
_a : Union[str, Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.' )
if self.config.newlines_in_values is not None:
raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported' )
return datasets.DatasetInfo(features=self.config.features )
def __lowercase ( self : Union[str, Any] ,_a : Dict ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_a : Dict = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a ,(str, list, tuple) ):
_a : Any = data_files
if isinstance(_a ,_a ):
_a : Any = [files]
_a : int = [dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'files': files} )]
_a : str = []
for split_name, files in data_files.items():
if isinstance(_a ,_a ):
_a : int = [files]
_a : List[Any] = [dl_manager.iter_files(_a ) for file in files]
splits.append(datasets.SplitGenerator(name=_a ,gen_kwargs={'files': files} ) )
return splits
def __lowercase ( self : str ,_a : pa.Table ):
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
_a : Any = self.config.features.arrow_schema.field(_a ).type
_a : str = pa_table.append_column(_a ,pa.array([None] * len(_a ) ,type=_a ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
_a : Tuple = table_cast(_a ,self.config.features.arrow_schema )
return pa_table
def __lowercase ( self : Union[str, Any] ,_a : Dict ):
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(_a ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_a ,encoding=self.config.encoding ,errors=self.config.encoding_errors ) as f:
_a : Optional[Any] = json.load(_a )
# We keep only the field we are interested in
_a : Any = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_a ,(list, tuple) ):
_a : Any = set().union(*[row.keys() for row in dataset] )
_a : List[Any] = {col: [row.get(_a ) for row in dataset] for col in keys}
else:
_a : Optional[int] = dataset
_a : str = pa.Table.from_pydict(_a )
yield file_idx, self._cast_table(_a )
# If the file has one json object per line
else:
with open(_a ,'rb' ) as f:
_a : Union[str, Any] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
_a : Any = max(self.config.chunksize // 32 ,16 << 10 )
_a : Union[str, Any] = (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
_a : Optional[Any] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_a )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
_a : str = batch.decode(self.config.encoding ,errors=_a ).encode('utf-8' )
try:
while True:
try:
_a : Optional[Any] = paj.read_json(
io.BytesIO(_a ) ,read_options=paj.ReadOptions(block_size=_a ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_a ,pa.ArrowInvalid )
and "straddling" not in str(_a )
or block_size > len(_a )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(_a )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_a ,encoding=self.config.encoding ,errors=self.config.encoding_errors ) as f:
_a : Tuple = json.load(_a )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(_a )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_a ,_a ): # list is the only sequence type supported in JSON
try:
_a : Tuple = set().union(*[row.keys() for row in dataset] )
_a : Tuple = {col: [row.get(_a ) for row in dataset] for col in keys}
_a : List[Any] = pa.Table.from_pydict(_a )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(_a )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(_a )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(_a )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_a )
batch_idx += 1
| 5 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
__lowerCAmelCase = """docs/source/en/_toctree.yml"""
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Any = defaultdict(__a )
for doc in model_doc:
counts[doc["local"]] += 1
_a : List[str] = [key for key, value in counts.items() if value > 1]
_a : str = []
for duplicate_key in duplicates:
_a : Union[str, Any] = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__a , key=lambda __a : s["title"].lower() )
def UpperCAmelCase_ (__a : Optional[int]=False ):
"""simple docstring"""
with open(__a , encoding='utf-8' ) as f:
_a : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
_a : Union[str, Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_a : Union[str, Any] = content[api_idx]['sections']
# Then to the model doc
_a : List[str] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_a : List[str] = api_doc[model_idx]['sections']
_a : List[Any] = [(idx, section) for idx, section in enumerate(__a ) if 'sections' in section]
_a : Tuple = False
for idx, modality_doc in modalities_docs:
_a : List[Any] = modality_doc['sections']
_a : Any = clean_model_doc_toc(__a )
if old_modality_doc != new_modality_doc:
_a : Union[str, Any] = True
if overwrite:
_a : str = new_modality_doc
if diff:
if overwrite:
_a : Dict = model_doc
_a : Dict = api_doc
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__lowerCAmelCase = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 5 | 1 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase_ (__a : Optional[Any] ):
"""simple docstring"""
_a : int = FileLock(str(tmpdir / 'foo.lock' ) )
_a : List[Any] = FileLock(str(tmpdir / 'foo.lock' ) )
_a : Any = 0.01
with locka.acquire():
with pytest.raises(__a ):
_a : int = time.time()
locka.acquire(__a )
assert time.time() - _start > timeout
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Dict = 'a' * 1_0_0_0 + '.lock'
_a : int = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(__a )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
_a : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__a ):
locka.acquire(0 )
| 5 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) != 3_2:
raise ValueError('Input must be of length 32' )
_a : Any = b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '08x' )[-8:]
_a : str = b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : List[Any] = b''
for char in message:
bit_string += format(__a , '08b' ).encode('utf-8' )
_a : int = format(len(__a ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__a ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(__a ) , 5_1_2 ):
_a : List[Any] = bit_string[pos : pos + 5_1_2]
_a : str = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '032b' )
_a : int = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__a , 2 )
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
return (a + b) % 2**3_2
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : str = preprocess(__a )
_a : Optional[int] = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
_a : int = 0x67_45_23_01
_a : Union[str, Any] = 0xEF_CD_AB_89
_a : str = 0x98_BA_DC_FE
_a : List[Any] = 0x10_32_54_76
_a : Optional[int] = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__a ):
_a : Union[str, Any] = aa
_a : List[Any] = ba
_a : List[Any] = ca
_a : Dict = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a : Optional[int] = d ^ (b & (c ^ d))
_a : Optional[Any] = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a : Optional[Any] = c ^ (d & (b ^ c))
_a : Dict = (5 * i + 1) % 1_6
elif i <= 4_7:
_a : Optional[Any] = b ^ c ^ d
_a : Dict = (3 * i + 5) % 1_6
else:
_a : int = c ^ (b | not_aa(__a ))
_a : List[str] = (7 * i) % 1_6
_a : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**3_2
_a : Union[str, Any] = d
_a : Tuple = c
_a : Optional[int] = b
_a : Union[str, Any] = sum_aa(__a , left_rotate_aa(__a , shift_amounts[i] ) )
# Add hashed chunk to running total
_a : Any = sum_aa(__a , __a )
_a : Dict = sum_aa(__a , __a )
_a : Union[str, Any] = sum_aa(__a , __a )
_a : str = sum_aa(__a , __a )
_a : Optional[Any] = reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ (__a : list[int] , __a : int , __a : int , __a : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_a, _a : Tuple = array[indexa], array[indexa]
def UpperCAmelCase_ (__a : list[int] , __a : int , __a : int , __a : int ):
"""simple docstring"""
if length > 1:
_a : str = int(length / 2 )
for i in range(__a , low + middle ):
comp_and_swap(__a , __a , i + middle , __a )
bitonic_merge(__a , __a , __a , __a )
bitonic_merge(__a , low + middle , __a , __a )
def UpperCAmelCase_ (__a : list[int] , __a : int , __a : int , __a : int ):
"""simple docstring"""
if length > 1:
_a : str = int(length / 2 )
bitonic_sort(__a , __a , __a , 1 )
bitonic_sort(__a , low + middle , __a , 0 )
bitonic_merge(__a , __a , __a , __a )
if __name__ == "__main__":
__lowerCAmelCase = input("""Enter numbers separated by a comma:\n""").strip()
__lowerCAmelCase = [int(item.strip()) for item in user_input.split(""",""")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("""\nSorted array in ascending order is: """, end="""""")
print(*unsorted, sep=""", """)
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("""Sorted array in descending order is: """, end="""""")
print(*unsorted, sep=""", """)
| 5 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase_ (__a : str , __a : Dict=0.999 , __a : List[str]="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__a : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__a : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_a : Tuple = []
for i in range(__a ):
_a : Union[str, Any] = i / num_diffusion_timesteps
_a : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__a ) / alpha_bar_fn(__a ) , __a ) )
return torch.tensor(__a , dtype=torch.floataa )
class UpperCAmelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Dict = 2
@register_to_config
def __init__( self : str ,_a : int = 1000 ,_a : float = 0.0_0085 ,_a : float = 0.012 ,_a : str = "linear" ,_a : Optional[Union[np.ndarray, List[float]]] = None ,_a : str = "epsilon" ,_a : Optional[bool] = False ,_a : Optional[bool] = False ,_a : float = 1.0 ,_a : str = "linspace" ,_a : int = 0 ,):
'''simple docstring'''
if trained_betas is not None:
_a : List[str] = torch.tensor(_a ,dtype=torch.floataa )
elif beta_schedule == "linear":
_a : Tuple = torch.linspace(_a ,_a ,_a ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a : List[str] = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,_a ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a : Dict = betas_for_alpha_bar(_a ,alpha_transform_type='cosine' )
elif beta_schedule == "exp":
_a : Tuple = betas_for_alpha_bar(_a ,alpha_transform_type='exp' )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_a : Optional[Any] = 1.0 - self.betas
_a : Optional[int] = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(_a ,_a ,_a )
_a : Optional[int] = use_karras_sigmas
def __lowercase ( self : Any ,_a : Union[str, Any] ,_a : Optional[Any]=None ):
'''simple docstring'''
if schedule_timesteps is None:
_a : List[Any] = self.timesteps
_a : Dict = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a : int = 1 if len(_a ) > 1 else 0
else:
_a : str = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
_a : str = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Union[float, torch.FloatTensor] ,):
'''simple docstring'''
_a : List[Any] = self.index_for_timestep(_a )
_a : Tuple = self.sigmas[step_index]
_a : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowercase ( self : Any ,_a : int ,_a : Union[str, torch.device] = None ,_a : Optional[int] = None ,):
'''simple docstring'''
_a : Optional[Any] = num_inference_steps
_a : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a : Optional[Any] = np.linspace(0 ,num_train_timesteps - 1 ,_a ,dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a : str = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : int = (np.arange(0 ,_a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a : Any = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : Union[str, Any] = (np.arange(_a ,0 ,-step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_a : Tuple = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a : Union[str, Any] = np.log(_a )
_a : str = np.interp(_a ,np.arange(0 ,len(_a ) ) ,_a )
if self.config.use_karras_sigmas:
_a : List[Any] = self._convert_to_karras(in_sigmas=_a ,num_inference_steps=self.num_inference_steps )
_a : Dict = np.array([self._sigma_to_t(_a ,_a ) for sigma in sigmas] )
_a : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a : Union[str, Any] = torch.from_numpy(_a ).to(device=_a )
_a : Any = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_a : List[Any] = torch.from_numpy(_a )
_a : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_a ).startswith('mps' ):
# mps does not support float64
_a : Tuple = timesteps.to(_a ,dtype=torch.floataa )
else:
_a : Dict = timesteps.to(device=_a )
# empty dt and derivative
_a : Tuple = None
_a : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a : Union[str, Any] = defaultdict(_a )
def __lowercase ( self : str ,_a : Dict ,_a : Dict ):
'''simple docstring'''
_a : Optional[int] = np.log(_a )
# get distribution
_a : Union[str, Any] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_a : List[Any] = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_a : Tuple = low_idx + 1
_a : Union[str, Any] = log_sigmas[low_idx]
_a : Optional[Any] = log_sigmas[high_idx]
# interpolate sigmas
_a : Optional[Any] = (low - log_sigma) / (low - high)
_a : List[str] = np.clip(_a ,0 ,1 )
# transform interpolation to time range
_a : Union[str, Any] = (1 - w) * low_idx + w * high_idx
_a : List[str] = t.reshape(sigma.shape )
return t
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Tuple ):
'''simple docstring'''
_a : float = in_sigmas[-1].item()
_a : float = in_sigmas[0].item()
_a : Tuple = 7.0 # 7.0 is the value used in the paper
_a : str = np.linspace(0 ,1 ,_a )
_a : Optional[Any] = sigma_min ** (1 / rho)
_a : Union[str, Any] = sigma_max ** (1 / rho)
_a : str = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return self.dt is None
def __lowercase ( self : int ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : Union[float, torch.FloatTensor] ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : bool = True ,):
'''simple docstring'''
_a : Union[str, Any] = self.index_for_timestep(_a )
# advance index counter by 1
_a : Any = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a : Tuple = self.sigmas[step_index]
_a : int = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_a : List[str] = self.sigmas[step_index - 1]
_a : List[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a : Optional[int] = 0
_a : Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a : Dict = sigma_hat if self.state_in_first_order else sigma_next
_a : Optional[int] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a : List[Any] = sigma_hat if self.state_in_first_order else sigma_next
_a : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_a : Union[str, Any] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
_a : Optional[int] = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a : Optional[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a : Any = sigma_next - sigma_hat
# store for 2nd order step
_a : int = derivative
_a : List[str] = dt
_a : Union[str, Any] = sample
else:
# 2. 2nd order / Heun's method
_a : Dict = (sample - pred_original_sample) / sigma_next
_a : Tuple = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_a : Optional[Any] = self.dt
_a : Union[str, Any] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_a : List[Any] = None
_a : Union[str, Any] = None
_a : Dict = None
_a : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __lowercase ( self : Optional[int] ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,):
'''simple docstring'''
_a : str = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
_a : Dict = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
_a : Optional[Any] = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
_a : int = self.timesteps.to(original_samples.device )
_a : Optional[Any] = timesteps.to(original_samples.device )
_a : Any = [self.index_for_timestep(_a ,_a ) for t in timesteps]
_a : Optional[int] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a : Optional[Any] = sigma.unsqueeze(-1 )
_a : Any = original_samples + noise * sigma
return noisy_samples
def __len__( self : Optional[int] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 5 | 1 |
'''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__lowerCAmelCase = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__lowerCAmelCase = concatenate_datasets
__lowerCAmelCase = DownloadConfig
__lowerCAmelCase = DownloadManager
__lowerCAmelCase = DownloadMode
__lowerCAmelCase = DownloadConfig
__lowerCAmelCase = DownloadMode
__lowerCAmelCase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 5 |
'''simple docstring'''
import qiskit
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
_a : Any = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_a : List[Any] = qiskit.QuantumCircuit(__a , __a )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_a : Tuple = qiskit.execute(__a , __a , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__a )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 5 | 1 |
'''simple docstring'''
__lowerCAmelCase = [
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 5 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def __lowercase ( self : str ):
'''simple docstring'''
_a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_a : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_a : List[str] = 'xvjiarui/stable-diffusion-2-inpainting'
_a, _a : str = FlaxStableDiffusionInpaintPipeline.from_pretrained(_a ,safety_checker=_a )
_a : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
_a : int = jax.random.PRNGKey(0 )
_a : Tuple = 50
_a : Any = jax.device_count()
_a : Dict = num_samples * [prompt]
_a : Optional[Any] = num_samples * [init_image]
_a : str = num_samples * [mask_image]
_a, _a, _a : Optional[Any] = pipeline.prepare_inputs(_a ,_a ,_a )
# shard inputs and rng
_a : Optional[Any] = replicate(_a )
_a : str = jax.random.split(_a ,jax.device_count() )
_a : Dict = shard(_a )
_a : int = shard(_a )
_a : int = shard(_a )
_a : Union[str, Any] = pipeline(
_a ,_a ,_a ,_a ,_a ,_a ,jit=_a )
_a : Union[str, Any] = output.images.reshape(_a ,512 ,512 ,3 )
_a : Union[str, Any] = images[0, 253:256, 253:256, -1]
_a : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_a : Union[str, Any] = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 5 | 1 |
'''simple docstring'''
__lowerCAmelCase = 8.314_462 # Unit - J mol-1 K-1
def UpperCAmelCase_ (__a : float , __a : float , __a : float ):
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def UpperCAmelCase_ (__a : float , __a : float , __a : float ):
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 5 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str , __a : str ):
"""simple docstring"""
_a : int = len(__a ) + 1
_a : List[str] = len(__a ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_a : Optional[int] = [[0 for i in range(__a )] for j in range(__a )]
# since string of zero length match pattern of zero length
_a : str = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __a ):
_a : Optional[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __a ):
_a : Dict = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __a ):
for j in range(1 , __a ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_a : Tuple = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_a : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_a : int = dp[i - 1][j]
else:
_a : Any = 0
else:
_a : Optional[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__lowerCAmelCase = """aab"""
__lowerCAmelCase = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 5 | 1 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__lowerCAmelCase = None
try:
import msvcrt
except ImportError:
__lowerCAmelCase = None
try:
import fcntl
except ImportError:
__lowerCAmelCase = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__lowerCAmelCase = OSError
# Data
# ------------------------------------------------
__lowerCAmelCase = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
__lowerCAmelCase = """3.0.12"""
__lowerCAmelCase = None
def UpperCAmelCase_ ():
"""simple docstring"""
global _logger
_a : Dict = _logger or logging.getLogger(__name__ )
return _logger
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Any ,_a : Optional[Any] ):
'''simple docstring'''
_a : Dict = lock_file
return None
def __str__( self : Any ):
'''simple docstring'''
_a : List[Any] = F"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] ,_a : List[str] ):
'''simple docstring'''
_a : List[str] = lock
return None
def __enter__( self : Union[str, Any] ):
'''simple docstring'''
return self.lock
def __exit__( self : Dict ,_a : List[Any] ,_a : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
self.lock.release()
return None
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] ,_a : Optional[Any] ,_a : int=-1 ,_a : str=None ):
'''simple docstring'''
_a : List[Any] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
_a : Dict = self.hash_filename_if_too_long(_a ,_a )
# The path to the lock file.
_a : List[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_a : List[str] = None
# The default timeout value.
_a : List[str] = timeout
# We use this lock primarily for the lock counter.
_a : int = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_a : str = 0
return None
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return self._lock_file
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return self._timeout
@timeout.setter
def __lowercase ( self : List[Any] ,_a : Any ):
'''simple docstring'''
_a : Union[str, Any] = float(_a )
return None
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError()
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError()
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return self._lock_file_fd is not None
def __lowercase ( self : str ,_a : Optional[int]=None ,_a : Optional[Any]=0.05 ):
'''simple docstring'''
if timeout is None:
_a : str = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_a : str = id(self )
_a : int = self._lock_file
_a : Dict = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(_a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_a : Tuple = max(0 ,self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __lowercase ( self : Optional[int] ,_a : Tuple=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_a : int = id(self )
_a : Union[str, Any] = self._lock_file
logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
_a : Optional[Any] = 0
logger().debug(F"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self : List[Any] ):
'''simple docstring'''
self.acquire()
return self
def __exit__( self : List[str] ,_a : str ,_a : Any ,_a : Any ):
'''simple docstring'''
self.release()
return None
def __del__( self : int ):
'''simple docstring'''
self.release(force=_a )
return None
def __lowercase ( self : Optional[int] ,_a : str ,_a : int ):
'''simple docstring'''
_a : Any = os.path.basename(_a )
if len(_a ) > max_length and max_length > 0:
_a : List[str] = os.path.dirname(_a )
_a : Optional[Any] = str(hash(_a ) )
_a : List[str] = filename[: max_length - len(_a ) - 8] + '...' + hashed_filename + '.lock'
return os.path.join(_a ,_a )
else:
return path
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : int ,_a : Optional[Any] ,_a : List[str]=-1 ,_a : int=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(_a ,timeout=_a ,max_filename_length=_a )
_a : Any = '\\\\?\\' + relative_to_absolute_path(self.lock_file )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_a : int = os.open(self._lock_file ,_a )
except OSError:
pass
else:
try:
msvcrt.locking(_a ,msvcrt.LK_NBLCK ,1 )
except OSError:
os.close(_a )
else:
_a : List[Any] = fd
return None
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Optional[int] = self._lock_file_fd
_a : Any = None
msvcrt.locking(_a ,msvcrt.LK_UNLCK ,1 )
os.close(_a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : int ,_a : Union[str, Any] ,_a : List[str]=-1 ,_a : Any=None ):
'''simple docstring'''
_a : Optional[int] = os.statvfs(os.path.dirname(_a ) ).f_namemax
super().__init__(_a ,timeout=_a ,max_filename_length=_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
_a : Optional[Any] = os.open(self._lock_file ,_a )
try:
fcntl.flock(_a ,fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(_a )
else:
_a : Tuple = fd
return None
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Optional[int] = self._lock_file_fd
_a : Union[str, Any] = None
fcntl.flock(_a ,fcntl.LOCK_UN )
os.close(_a )
return None
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Dict = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_a : List[Any] = os.open(self._lock_file ,_a )
except OSError:
pass
else:
_a : List[str] = fd
return None
def __lowercase ( self : int ):
'''simple docstring'''
os.close(self._lock_file_fd )
_a : Tuple = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__lowerCAmelCase = None
if msvcrt:
__lowerCAmelCase = WindowsFileLock
elif fcntl:
__lowerCAmelCase = UnixFileLock
else:
__lowerCAmelCase = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 5 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = BlenderbotSmallTokenizer
__UpperCAmelCase : Tuple = False
def __lowercase ( self : List[Any] ):
'''simple docstring'''
super().setUp()
_a : List[str] = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
_a : Tuple = dict(zip(_a ,range(len(_a ) ) ) )
_a : List[Any] = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
_a : List[Any] = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
_a : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_a : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(_a ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(_a ) )
def __lowercase ( self : List[Any] ,**_a : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : Tuple ,_a : int ):
'''simple docstring'''
_a : Optional[Any] = 'adapt act apte'
_a : Dict = 'adapt act apte'
return input_text, output_text
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_a : Union[str, Any] = 'adapt act apte'
_a : Dict = ['adapt', 'act', 'ap@@', 'te']
_a : Tuple = tokenizer.tokenize(_a )
self.assertListEqual(_a ,_a )
_a : List[str] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_a : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : str = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
_a : Union[str, Any] = 'I am a small frog.'
_a : int = tok([src_text] ,padding=_a ,truncation=_a )['input_ids']
_a : str = tok.batch_decode(_a ,skip_special_tokens=_a ,clean_up_tokenization_spaces=_a )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
_a : Union[str, Any] = 'I am a small frog .'
_a : Optional[Any] = '.'
_a : Optional[Any] = tok(_a )['input_ids']
_a : Union[str, Any] = tok(_a )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 5 | 1 |
'''simple docstring'''
import math
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if not isinstance(__a , __a ):
_a : Tuple = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__a )
if number < 1:
_a : Any = f"""Input value of [number={number}] must be > 0"""
raise ValueError(__a )
elif number == 1:
return 3
elif number == 2:
return 5
else:
_a : str = int(math.log(number // 3 , 2 ) ) + 2
_a : Union[str, Any] = [3, 5]
_a : Any = 2
_a : List[Any] = 3
for block in range(1 , __a ):
for _ in range(__a ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
__lowerCAmelCase = 0
try:
__lowerCAmelCase = proth(number)
except ValueError:
print(f'''ValueError: there is no {number}th Proth number''')
continue
print(f'''The {number}th Proth number: {value}''')
| 5 |
'''simple docstring'''
__lowerCAmelCase = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__lowerCAmelCase = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = 'Morse code here!'
print(__a )
_a : Tuple = encrypt(__a )
print(__a )
_a : str = decrypt(__a )
print(__a )
if __name__ == "__main__":
main()
| 5 | 1 |
'''simple docstring'''
import sys
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : List[str] = len(__a )
_a : Dict = [[0 for x in range(__a )] for x in range(__a )]
_a : Union[str, Any] = [[0 for x in range(__a )] for x in range(__a )]
for chain_length in range(2 , __a ):
for a in range(1 , n - chain_length + 1 ):
_a : Tuple = a + chain_length - 1
_a : Any = sys.maxsize
for c in range(__a , __a ):
_a : Optional[Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_a : Dict = cost
_a : Any = c
return matrix, sol
def UpperCAmelCase_ (__a : Tuple , __a : List[str] , __a : Dict ):
"""simple docstring"""
if i == j:
print('A' + str(__a ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(__a , __a , optimal_solution[i][j] )
print_optiomal_solution(__a , optimal_solution[i][j] + 1 , __a )
print(')' , end=' ' )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
_a : Any = len(__a )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_a, _a : Union[str, Any] = matrix_chain_order(__a )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__a , 1 , n - 1 )
if __name__ == "__main__":
main()
| 5 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls : Optional[Any] ):
'''simple docstring'''
_a : List[Any] = TOKEN
HfFolder.save_token(_a )
@classmethod
def __lowercase ( cls : List[Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-config' )
except HTTPError:
pass
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Any = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('test-config' ,use_auth_token=self._token )
_a : Optional[Any] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a ,repo_id='test-config' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Dict = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Tuple = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' ,use_auth_token=self._token )
_a : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a ,repo_id='valid_org/test-config-org' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
_a : Optional[Any] = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map ,{'AutoConfig': 'custom_configuration.CustomConfig'} )
_a : int = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" ,trust_remote_code=_a )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ ,'CustomConfig' )
self.assertEqual(new_config.attribute ,42 )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_a : int = c.n_embd + 1 # int
_a : str = c.resid_pdrop + 1.0 # float
_a : Dict = not c.scale_attn_weights # bool
_a : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(_a ,c.n_embd ,'mismatch for key: n_embd' )
self.assertEqual(_a ,c.resid_pdrop ,'mismatch for key: resid_pdrop' )
self.assertEqual(_a ,c.scale_attn_weights ,'mismatch for key: scale_attn_weights' )
self.assertEqual(_a ,c.summary_type ,'mismatch for key: summary_type' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : int = PretrainedConfig()
_a : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_a ,['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_a : Dict = [key for key, value in config_common_kwargs.items() if value == getattr(_a ,_a )]
if len(_a ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F""" {', '.join(_a )}.""" )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaises(_a ):
# config is in subfolder, the following should not work without specifying the subfolder
_a : List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_a : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' ,subfolder='bert' )
self.assertIsNotNone(_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[Any] = mock.Mock()
_a : Any = 500
_a : Any = {}
_a : Any = HTTPError
_a : List[Any] = {}
# Download this model to make sure it's in the cache.
_a : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' ,return_value=_a ) as mock_head:
_a : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : int = AutoConfig.from_pretrained('bert-base-cased' )
_a : List[str] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_a )
_a : str = 2
json.dump(configuration.to_dict() ,open(os.path.join(_a ,'config.4.0.0.json' ) ,'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_a : int = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size ,2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_a : Tuple = ['config.42.0.0.json']
_a : int = 768
configuration.save_pretrained(_a )
shutil.move(os.path.join(_a ,'config.4.0.0.json' ) ,os.path.join(_a ,'config.42.0.0.json' ) )
_a : int = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size ,768 )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Tuple = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
_a : Optional[int] = 'v4.0.0'
_a, _a : Tuple = new_transformers.models.auto.AutoConfig.from_pretrained(
_a ,return_unused_kwargs=_a )
self.assertEqual(new_configuration.hidden_size ,2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_a ,{} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_a : str = 'v3.0.0'
_a : Optional[Any] = old_transformers.models.auto.AutoConfig.from_pretrained(_a )
self.assertEqual(old_configuration.hidden_size ,768 )
| 5 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Optional[int] ,*_a : Optional[int] ,**_a : Tuple ):
'''simple docstring'''
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' ,_a ,)
super().__init__(*_a ,**_a )
| 5 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__lowerCAmelCase = {
"""169M""": 1_2,
"""430M""": 2_4,
"""1B5""": 2_4,
"""3B""": 3_2,
"""7B""": 3_2,
"""14B""": 4_0,
}
__lowerCAmelCase = {
"""169M""": 7_6_8,
"""430M""": 1_0_2_4,
"""1B5""": 2_0_4_8,
"""3B""": 2_5_6_0,
"""7B""": 4_0_9_6,
"""14B""": 5_1_2_0,
}
def UpperCAmelCase_ (__a : Dict ):
"""simple docstring"""
_a : List[Any] = list(state_dict.keys() )
for name in state_dict_keys:
_a : List[Any] = state_dict.pop(__a )
# emb -> embedding
if name.startswith('emb.' ):
_a : List[str] = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
_a : Dict = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
_a : int = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , __a )
# ffn -> feed_forward
_a : str = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , __a )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
_a : Any = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
_a : int = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
_a : Tuple = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
_a : Tuple = 'rwkv.' + name
_a : List[Any] = weight
return state_dict
def UpperCAmelCase_ (__a : Tuple , __a : Union[str, Any] , __a : List[str] , __a : str=None , __a : List[str]=None , __a : int=False , __a : int=None ):
"""simple docstring"""
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
_a : List[Any] = 5_0_2_7_7
_a : Optional[Any] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
_a : Optional[Any] = PreTrainedTokenizerFast(tokenizer_file=__a )
_a : List[Any] = len(__a )
tokenizer.save_pretrained(__a )
# 2. Build the config
_a : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_a : str = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
_a : str = RwkvConfig(
vocab_size=__a , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__a )
# 3. Download model file then convert state_dict
_a : Tuple = hf_hub_download(__a , __a )
_a : Optional[int] = torch.load(__a , map_location='cpu' )
_a : Dict = convert_state_dict(__a )
# 4. Split in shards and save
_a, _a : List[Any] = shard_checkpoint(__a )
for shard_file, shard in shards.items():
torch.save(__a , os.path.join(__a , __a ) )
if index is not None:
_a : Dict = os.path.join(__a , __a )
# Save the index as well
with open(__a , 'w' , encoding='utf-8' ) as f:
_a : List[Any] = json.dumps(__a , indent=2 , sort_keys=__a ) + '\n'
f.write(__a )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
_a : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_a : Optional[Any] = torch.load(os.path.join(__a , __a ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__a , __a ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
_a : List[str] = AutoModelForCausalLM.from_pretrained(__a )
model.push_to_hub(__a , max_shard_size='2GB' )
tokenizer.push_to_hub(__a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
__lowerCAmelCase = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 5 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
__lowerCAmelCase = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def UpperCAmelCase_ (__a : Optional[Any] , __a : Any , __a : Optional[Any] , __a : Tuple , __a : Dict , __a : List[Any] ):
"""simple docstring"""
for attribute in key.split('.' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
_a : Optional[int] = 'lm_head'
_a : Any = getattr(__a , __a )
if weight_type is not None:
_a : List[str] = getattr(__a , __a ).shape
else:
_a : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_a : Optional[int] = value
elif weight_type == "weight_g":
_a : str = value
elif weight_type == "weight_v":
_a : int = value
elif weight_type == "bias":
_a : Optional[Any] = value
else:
_a : Any = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def UpperCAmelCase_ (__a : str , __a : List[Any] , __a : Any ):
"""simple docstring"""
_a : str = []
_a : int = fairseq_model.state_dict()
_a : str = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
_a : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == 'group' , )
_a : List[str] = True
else:
for key, mapped_key in MAPPING.items():
_a : List[str] = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_a : Any = True
if "*" in mapped_key:
_a : Optional[int] = name.split(__a )[0].split('.' )[-2]
_a : int = mapped_key.replace('*' , __a )
if "weight_g" in name:
_a : Any = 'weight_g'
elif "weight_v" in name:
_a : Optional[Any] = 'weight_v'
elif "bias" in name:
_a : Any = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_a : Any = 'weight'
else:
_a : List[str] = None
set_recursively(__a , __a , __a , __a , __a , __a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCAmelCase_ (__a : List[Any] , __a : Optional[int] , __a : int , __a : Optional[Any] , __a : Optional[int] ):
"""simple docstring"""
_a : Optional[Any] = full_name.split('conv_layers.' )[-1]
_a : str = name.split('.' )
_a : Tuple = int(items[0] )
_a : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_a : Tuple = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_a : Tuple = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_a : Tuple = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_a : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def UpperCAmelCase_ (__a : List[Any] , __a : int , __a : Dict=None , __a : Tuple=None , __a : List[Any]=True ):
"""simple docstring"""
if config_path is not None:
_a : Any = UniSpeechConfig.from_pretrained(__a )
else:
_a : Optional[int] = UniSpeechConfig()
if is_finetuned:
if dict_path:
_a : str = Dictionary.load_from_json(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_a : int = target_dict.pad_index
_a : Tuple = target_dict.bos_index
_a : List[str] = target_dict.eos_index
_a : Union[str, Any] = len(target_dict.symbols )
_a : Tuple = os.path.join(__a , 'vocab.json' )
if not os.path.isdir(__a ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
_a : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
_a : List[Any] = 4_2
_a : Union[str, Any] = 4_3
with open(__a , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(__a , __a )
_a : List[str] = WavaVecaPhonemeCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__a , )
_a : str = True if config.feat_extract_norm == 'layer' else False
_a : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
_a : Tuple = WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
_a : List[str] = UniSpeechForCTC(__a )
else:
_a : Any = UniSpeechForPreTraining(__a )
if is_finetuned:
_a, _a, _a : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path} )
else:
_a, _a, _a : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_a : Union[str, Any] = model[0].eval()
recursively_load_weights(__a , __a , __a )
hf_unispeech.save_pretrained(__a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCAmelCase = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 5 |
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
__lowerCAmelCase = datasets.logging.get_logger(__name__)
__lowerCAmelCase = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
__lowerCAmelCase = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
__lowerCAmelCase = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://unbabel.github.io/COMET/html/index.html' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'sources': datasets.Value('string' ,id='sequence' ),
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/Unbabel/COMET'] ,reference_urls=[
'https://github.com/Unbabel/COMET',
'https://www.aclweb.org/anthology/2020.emnlp-main.213/',
'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6',
] ,)
def __lowercase ( self : int ,_a : int ):
'''simple docstring'''
if self.config_name == "default":
_a : List[Any] = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) )
else:
_a : List[str] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __lowercase ( self : Tuple ,_a : List[Any] ,_a : Dict ,_a : Optional[Any] ,_a : List[str]=None ,_a : Tuple=False ):
'''simple docstring'''
if gpus is None:
_a : str = 1 if torch.cuda.is_available() else 0
_a : Optional[Any] = {'src': sources, 'mt': predictions, 'ref': references}
_a : Optional[Any] = [dict(zip(_a ,_a ) ) for t in zip(*data.values() )]
_a, _a : Tuple = self.scorer.predict(_a ,gpus=_a ,progress_bar=_a )
return {"mean_score": mean_score, "scores": scores}
| 5 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.