code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
def a_ ( __lowercase : Any ) -> List[str]:
if not head:
return True
# split the list to two parts
_snake_case , _snake_case = head.next, head
while fast and fast.next:
_snake_case = fast.next.next
_snake_case = slow.next
_snake_case = slow.next
_snake_case = None # Don't forget here! But forget still works!
# reverse the second part
_snake_case = None
while second:
_snake_case = second.next
_snake_case = node
_snake_case = second
_snake_case = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
_snake_case = node.next
_snake_case = head.next
return True
def a_ ( __lowercase : List[str] ) -> Tuple:
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
_snake_case = _snake_case = _snake_case = head
while fast and fast.next:
_snake_case , _snake_case = fast.next.next, slow.next
# 2. Push the second half into the stack
_snake_case = [slow.val]
while slow.next:
_snake_case = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
_snake_case = cur.next
return True
def a_ ( __lowercase : Any ) -> List[Any]:
if not head or not head.next:
return True
_snake_case = {}
_snake_case = 0
while head:
if head.val in d:
d[head.val].append(__lowercase )
else:
_snake_case = [pos]
_snake_case = head.next
pos += 1
_snake_case = pos - 1
_snake_case = 0
for v in d.values():
if len(__lowercase ) % 2 != 0:
middle += 1
else:
_snake_case = 0
for i in range(0 , len(__lowercase ) ):
if v[i] + v[len(__lowercase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True | 686 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_lowerCamelCase : List[Any] = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_lowerCamelCase : Any = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_lowerCamelCase : Union[str, Any] = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def a_ ( __lowercase : List[Any] , __lowercase : Any ) -> Union[str, Any]:
return float((preds == labels).mean() )
def a_ ( __lowercase : Optional[Any] , __lowercase : List[str] ) -> Dict:
_snake_case = simple_accuracy(__lowercase , __lowercase )
_snake_case = float(fa_score(y_true=__lowercase , y_pred=__lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( __lowercase : int , __lowercase : str ) -> str:
_snake_case = float(pearsonr(__lowercase , __lowercase )[0] )
_snake_case = float(spearmanr(__lowercase , __lowercase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def A ( self : List[Any] , lowercase : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowercase , lowercase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowercase , lowercase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowercase , lowercase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' ) | 686 | 1 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : List[str] , lowercase : Collection[float] | None = None ):
'''simple docstring'''
if components is None:
_snake_case = []
_snake_case = list(lowercase )
def __len__( self : Optional[Any] ):
'''simple docstring'''
return len(self.__components )
def __str__( self : Dict ):
'''simple docstring'''
return "(" + ",".join(map(lowercase , self.__components ) ) + ")"
def __add__( self : Tuple , lowercase : Vector ):
'''simple docstring'''
_snake_case = len(self )
if size == len(lowercase ):
_snake_case = [self.__components[i] + other.component(lowercase ) for i in range(lowercase )]
return Vector(lowercase )
else:
raise Exception('must have the same size' )
def __sub__( self : str , lowercase : Vector ):
'''simple docstring'''
_snake_case = len(self )
if size == len(lowercase ):
_snake_case = [self.__components[i] - other.component(lowercase ) for i in range(lowercase )]
return Vector(lowercase )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self : Dict , lowercase : float ):
'''simple docstring'''
...
@overload
def __mul__( self : Tuple , lowercase : Vector ):
'''simple docstring'''
...
def __mul__( self : Optional[int] , lowercase : float | Vector ):
'''simple docstring'''
if isinstance(lowercase , (float, int) ):
_snake_case = [c * other for c in self.__components]
return Vector(lowercase )
elif isinstance(lowercase , lowercase ) and len(self ) == len(lowercase ):
_snake_case = len(self )
_snake_case = [self.__components[i] * other.component(lowercase ) for i in range(lowercase )]
return sum(lowercase )
else: # error case
raise Exception('invalid operand!' )
def A ( self : List[Any] ):
'''simple docstring'''
return Vector(self.__components )
def A ( self : int , lowercase : int ):
'''simple docstring'''
if isinstance(lowercase , lowercase ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def A ( self : int , lowercase : int , lowercase : float ):
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
_snake_case = value
def A ( self : Union[str, Any] ):
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
_snake_case = [c**2 for c in self.__components]
return math.sqrt(sum(lowercase ) )
def A ( self : Dict , lowercase : Vector , lowercase : bool = False ):
'''simple docstring'''
_snake_case = self * other
_snake_case = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def a_ ( __lowercase : int ) -> Vector:
assert isinstance(__lowercase , __lowercase )
return Vector([0] * dimension )
def a_ ( __lowercase : int , __lowercase : int ) -> Vector:
assert isinstance(__lowercase , __lowercase ) and (isinstance(__lowercase , __lowercase ))
_snake_case = [0] * dimension
_snake_case = 1
return Vector(__lowercase )
def a_ ( __lowercase : float , __lowercase : Vector , __lowercase : Vector ) -> Vector:
assert (
isinstance(__lowercase , __lowercase )
and isinstance(__lowercase , __lowercase )
and (isinstance(__lowercase , (int, float) ))
)
return x * scalar + y
def a_ ( __lowercase : int , __lowercase : int , __lowercase : int ) -> Vector:
random.seed(__lowercase )
_snake_case = [random.randint(__lowercase , __lowercase ) for _ in range(__lowercase )]
return Vector(__lowercase )
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowercase : list[list[float]] , lowercase : int , lowercase : int ):
'''simple docstring'''
_snake_case = matrix
_snake_case = w
_snake_case = h
def __str__( self : List[Any] ):
'''simple docstring'''
_snake_case = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : int , lowercase : Matrix ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
_snake_case = []
for i in range(self.__height ):
_snake_case = [
self.__matrix[i][j] + other.component(lowercase , lowercase )
for j in range(self.__width )
]
matrix.append(lowercase )
return Matrix(lowercase , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self : str , lowercase : Matrix ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
_snake_case = []
for i in range(self.__height ):
_snake_case = [
self.__matrix[i][j] - other.component(lowercase , lowercase )
for j in range(self.__width )
]
matrix.append(lowercase )
return Matrix(lowercase , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self : Any , lowercase : float ):
'''simple docstring'''
...
@overload
def __mul__( self : int , lowercase : Vector ):
'''simple docstring'''
...
def __mul__( self : Tuple , lowercase : float | Vector ):
'''simple docstring'''
if isinstance(lowercase , lowercase ): # matrix-vector
if len(lowercase ) == self.__width:
_snake_case = zero_vector(self.__height )
for i in range(self.__height ):
_snake_case = [
self.__matrix[i][j] * other.component(lowercase )
for j in range(self.__width )
]
ans.change_component(lowercase , sum(lowercase ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(lowercase , (int, float) ): # matrix-scalar
_snake_case = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowercase , self.__width , self.__height )
return None
def A ( self : Optional[Any] ):
'''simple docstring'''
return self.__height
def A ( self : Any ):
'''simple docstring'''
return self.__width
def A ( self : Optional[Any] , lowercase : int , lowercase : int ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def A ( self : Dict , lowercase : int , lowercase : int , lowercase : float ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
_snake_case = value
else:
raise Exception('change_component: indices out of bounds' )
def A ( self : List[Any] , lowercase : int , lowercase : int ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception('Matrix is not square' )
_snake_case = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowercase ) ):
_snake_case = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowercase , self.__width - 1 , self.__height - 1 ).determinant()
def A ( self : Tuple , lowercase : int , lowercase : int ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowercase , lowercase )
else:
raise Exception('Indices out of bounds' )
def A ( self : Tuple ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
_snake_case = [
self.__matrix[0][y] * self.cofactor(0 , lowercase ) for y in range(self.__width )
]
return sum(lowercase )
def a_ ( __lowercase : int ) -> Matrix:
_snake_case = [[0] * n for _ in range(__lowercase )]
return Matrix(__lowercase , __lowercase , __lowercase )
def a_ ( __lowercase : int , __lowercase : int , __lowercase : int , __lowercase : int ) -> Matrix:
random.seed(__lowercase )
_snake_case = [
[random.randint(__lowercase , __lowercase ) for _ in range(__lowercase )] for _ in range(__lowercase )
]
return Matrix(__lowercase , __lowercase , __lowercase ) | 686 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_lowerCamelCase : Dict = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = "sequence-classification"
def __init__( self : Optional[int] , lowercase : Any ):
'''simple docstring'''
if type(lowercase ) == dict:
_snake_case = Namespace(**lowercase )
_snake_case = glue_output_modes[hparams.task]
_snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(lowercase , lowercase , self.mode )
def A ( self : Optional[Any] , **lowercase : Optional[Any] ):
'''simple docstring'''
return self.model(**lowercase )
def A ( self : Optional[Any] , lowercase : str , lowercase : Tuple ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case = outputs[0]
_snake_case = self.trainer.lr_schedulers[0]['scheduler']
_snake_case = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.hparams
_snake_case = processors[args.task]()
_snake_case = processor.get_labels()
for mode in ["train", "dev"]:
_snake_case = self._feature_file(lowercase )
if os.path.exists(lowercase ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , lowercase )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_snake_case = convert_examples_to_features(
lowercase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , lowercase )
torch.save(lowercase , lowercase )
def A ( self : Dict , lowercase : str , lowercase : int , lowercase : bool = False ):
'''simple docstring'''
_snake_case = 'dev' if mode == 'test' else mode
_snake_case = self._feature_file(lowercase )
logger.info('Loading features from cached file %s' , lowercase )
_snake_case = torch.load(lowercase )
_snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(lowercase , lowercase , lowercase , lowercase ) , batch_size=lowercase , shuffle=lowercase , )
def A ( self : str , lowercase : Optional[Any] , lowercase : str ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case , _snake_case = outputs[:2]
_snake_case = logits.detach().cpu().numpy()
_snake_case = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : int , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_snake_case = np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_snake_case = np.argmax(lowercase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_snake_case = np.squeeze(lowercase )
_snake_case = np.concatenate([x['target'] for x in outputs] , axis=0 )
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , lowercase , lowercase )}
_snake_case = dict(results.items() )
_snake_case = results
return ret, preds_list, out_label_list
def A ( self : int , lowercase : list ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : List[str] , lowercase : Any ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( lowercase : Tuple , lowercase : Any ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(lowercase , lowercase )
parser.add_argument(
'--max_seq_length' , default=128 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=lowercase , required=lowercase , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=lowercase , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def a_ ( ) -> Union[str, Any]:
_snake_case = argparse.ArgumentParser()
add_generic_args(__lowercase , os.getcwd() )
_snake_case = GLUETransformer.add_model_specific_args(__lowercase , os.getcwd() )
_snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_snake_case = os.path.join(
'./results' , f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_snake_case = GLUETransformer(__lowercase )
_snake_case = generic_train(__lowercase , __lowercase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_snake_case = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=__lowercase ) )
_snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__lowercase )
if __name__ == "__main__":
main() | 686 | 1 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def a_ ( __lowercase : int ) -> Any:
_snake_case = [False] * len(__lowercase )
_snake_case = [-1] * len(__lowercase )
def dfs(__lowercase : int , __lowercase : List[Any] ):
_snake_case = True
_snake_case = c
for u in graph[v]:
if not visited[u]:
dfs(__lowercase , 1 - c )
for i in range(len(__lowercase ) ):
if not visited[i]:
dfs(__lowercase , 0 )
for i in range(len(__lowercase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_lowerCamelCase : Dict = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph)) | 686 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = LEDConfig
_UpperCAmelCase : int = {}
_UpperCAmelCase : List[str] = "gelu"
def __init__( self : Union[str, Any] , lowercase : Optional[int] , lowercase : Dict=13 , lowercase : Dict=7 , lowercase : Tuple=True , lowercase : Dict=False , lowercase : Dict=99 , lowercase : Any=32 , lowercase : List[Any]=2 , lowercase : List[str]=4 , lowercase : List[str]=37 , lowercase : Dict=0.1 , lowercase : int=0.1 , lowercase : List[Any]=20 , lowercase : int=2 , lowercase : Optional[Any]=1 , lowercase : List[str]=0 , lowercase : Optional[int]=4 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = eos_token_id
_snake_case = pad_token_id
_snake_case = bos_token_id
_snake_case = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case = prepare_led_inputs_dict(lowercase , lowercase , lowercase )
_snake_case = tf.concat(
[tf.zeros_like(lowercase )[:, :-1], tf.ones_like(lowercase )[:, -1:]] , axis=-1 , )
_snake_case = global_attention_mask
return config, inputs_dict
def A ( self : str , lowercase : str , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = TFLEDModel(config=lowercase ).get_decoder()
_snake_case = inputs_dict['input_ids']
_snake_case = input_ids[:1, :]
_snake_case = inputs_dict['attention_mask'][:1, :]
_snake_case = 1
# first forward pass
_snake_case = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
_snake_case , _snake_case = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case = model(lowercase , attention_mask=lowercase )[0]
_snake_case = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case = output_from_no_past[:, -3:, random_slice_idx]
_snake_case = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : str=None , ) -> Union[str, Any]:
if attention_mask is None:
_snake_case = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_UpperCAmelCase : Optional[int] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_UpperCAmelCase : Tuple = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCAmelCase : str = True
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : str = False
_UpperCAmelCase : List[Any] = False
def A ( self : Any ):
'''simple docstring'''
_snake_case = TFLEDModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = tf.zeros_like(inputs_dict['attention_mask'] )
_snake_case = 2
_snake_case = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
_snake_case = True
_snake_case = self.model_tester.seq_length
_snake_case = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase : List[str] ):
_snake_case = outputs.decoder_attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase : List[str] ):
_snake_case = [t.numpy() for t in outputs.encoder_attentions]
_snake_case = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = False
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
_snake_case = len(lowercase )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
if self.is_encoder_decoder:
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_decoder_attentions_output(lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
# Check attention is always last and order is fine
_snake_case = True
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase ) )
self.assertEqual(model.config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def A ( self : List[Any] ):
'''simple docstring'''
pass
def A ( self : Any ):
'''simple docstring'''
pass
def a_ ( __lowercase : str ) -> Optional[Any]:
return tf.constant(__lowercase , dtype=tf.intaa )
_lowerCamelCase : List[Any] = 1E-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 )
def A ( self : str ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 , rtol=1E-3 ) | 686 | 1 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = "linear"
_UpperCAmelCase : Tuple = "cosine"
_UpperCAmelCase : int = "cosine_with_restarts"
_UpperCAmelCase : Dict = "polynomial"
_UpperCAmelCase : str = "constant"
_UpperCAmelCase : int = "constant_with_warmup"
_UpperCAmelCase : List[str] = "piecewise_constant"
def a_ ( __lowercase : Optimizer , __lowercase : int = -1 ) -> List[str]:
return LambdaLR(__lowercase , lambda __lowercase : 1 , last_epoch=__lowercase )
def a_ ( __lowercase : Optimizer , __lowercase : int , __lowercase : int = -1 ) -> Dict:
def lr_lambda(__lowercase : int ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1.0 , __lowercase ) )
return 1.0
return LambdaLR(__lowercase , __lowercase , last_epoch=__lowercase )
def a_ ( __lowercase : Optimizer , __lowercase : str , __lowercase : int = -1 ) -> Dict:
_snake_case = {}
_snake_case = step_rules.split(',' )
for rule_str in rule_list[:-1]:
_snake_case , _snake_case = rule_str.split(':' )
_snake_case = int(__lowercase )
_snake_case = float(__lowercase )
_snake_case = value
_snake_case = float(rule_list[-1] )
def create_rules_function(__lowercase : List[str] , __lowercase : List[Any] ):
def rule_func(__lowercase : int ) -> float:
_snake_case = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowercase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_snake_case = create_rules_function(__lowercase , __lowercase )
return LambdaLR(__lowercase , __lowercase , last_epoch=__lowercase )
def a_ ( __lowercase : List[Any] , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : str=-1 ) -> List[Any]:
def lr_lambda(__lowercase : int ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def a_ ( __lowercase : Optimizer , __lowercase : int , __lowercase : int , __lowercase : float = 0.5 , __lowercase : int = -1 ) -> Union[str, Any]:
def lr_lambda(__lowercase : List[str] ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
_snake_case = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowercase ) * 2.0 * progress )) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def a_ ( __lowercase : Optimizer , __lowercase : int , __lowercase : int , __lowercase : int = 1 , __lowercase : int = -1 ) -> Dict:
def lr_lambda(__lowercase : str ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
_snake_case = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowercase ) * progress) % 1.0) )) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def a_ ( __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Tuple , __lowercase : Any=1E-7 , __lowercase : List[str]=1.0 , __lowercase : List[Any]=-1 ) -> Optional[int]:
_snake_case = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(__lowercase : int ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_snake_case = lr_init - lr_end
_snake_case = num_training_steps - num_warmup_steps
_snake_case = 1 - (current_step - num_warmup_steps) / decay_steps
_snake_case = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowercase , __lowercase , __lowercase )
_lowerCamelCase : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def a_ ( __lowercase : Union[str, SchedulerType] , __lowercase : Optimizer , __lowercase : Optional[str] = None , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : int = 1 , __lowercase : float = 1.0 , __lowercase : int = -1 , ) -> Tuple:
_snake_case = SchedulerType(__lowercase )
_snake_case = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowercase , last_epoch=__lowercase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowercase , step_rules=__lowercase , last_epoch=__lowercase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowercase , num_warmup_steps=__lowercase , last_epoch=__lowercase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , num_cycles=__lowercase , last_epoch=__lowercase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , power=__lowercase , last_epoch=__lowercase , )
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , last_epoch=__lowercase ) | 686 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_lowerCamelCase : Union[str, Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_lowerCamelCase : Union[str, Any] = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
_lowerCamelCase : Optional[int] = '''zero2'''
_lowerCamelCase : List[Any] = '''zero3'''
_lowerCamelCase : Dict = [ZEROa, ZEROa]
def a_ ( __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Tuple ) -> Dict:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_snake_case = parameterized.to_safe_name('_'.join(str(__lowercase ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
_lowerCamelCase : Dict = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : List[Any] , lowercase : Dict ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Any , lowercase : str , lowercase : List[str] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : Optional[Any] , lowercase : Optional[int] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
def A ( self : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
pass
def A ( self : str , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : bool = True , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = models[model]
_snake_case = self.run_trainer(
stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , )
self.do_checks(lowercase )
return output_dir
def A ( self : Any , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : int = 1 , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase )
_snake_case = f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_snake_case = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_snake_case = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_snake_case = self.get_launcher(lowercase )
_snake_case = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase , env=self.get_env() )
return output_dir
def A ( self : List[str] , lowercase : Any=False ):
'''simple docstring'''
_snake_case = min(2 , get_gpu_count() ) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split() | 686 | 1 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase : Dict , lowercase : List[str]=99 , lowercase : Optional[Any]=13 , lowercase : Union[str, Any]=16 , lowercase : Optional[int]=7 , lowercase : str=True , lowercase : Optional[int]=True , lowercase : List[str]=True , lowercase : Dict=False , lowercase : List[str]=True , lowercase : Any=2 , lowercase : Optional[int]=32 , lowercase : Optional[Any]=4 , lowercase : Union[str, Any]=4 , lowercase : Any=30 , lowercase : Optional[Any]=0 , lowercase : List[str]=1 , lowercase : Union[str, Any]=2 , lowercase : List[Any]=None , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = decoder_seq_length
# For common tests
_snake_case = self.decoder_seq_length
_snake_case = is_training
_snake_case = use_attention_mask
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = d_model
_snake_case = d_model
_snake_case = decoder_layers
_snake_case = decoder_layers
_snake_case = decoder_ffn_dim
_snake_case = decoder_attention_heads
_snake_case = decoder_attention_heads
_snake_case = eos_token_id
_snake_case = bos_token_id
_snake_case = pad_token_id
_snake_case = decoder_start_token_id
_snake_case = use_cache
_snake_case = max_position_embeddings
_snake_case = None
_snake_case = decoder_seq_length
_snake_case = 2
_snake_case = 1
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_snake_case = None
if self.use_attention_mask:
_snake_case = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_snake_case = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def A ( self : List[Any] , lowercase : Optional[Any] , lowercase : int , lowercase : List[Any] , lowercase : Optional[Any] , ):
'''simple docstring'''
_snake_case = True
_snake_case = TrOCRDecoder(config=lowercase ).to(lowercase ).eval()
_snake_case = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_snake_case = model(lowercase , use_cache=lowercase )
_snake_case = model(lowercase )
_snake_case = model(lowercase , use_cache=lowercase )
self.parent.assertTrue(len(lowercase ) == len(lowercase ) )
self.parent.assertTrue(len(lowercase ) == len(lowercase ) + 1 )
_snake_case = outputs['past_key_values']
# create hypothetical next token and extent to next_input_ids
_snake_case = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
_snake_case = model(lowercase )['last_hidden_state']
_snake_case = model(lowercase , past_key_values=lowercase )['last_hidden_state']
# select random slice
_snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_snake_case = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_snake_case = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(lowercase , lowercase , atol=1E-3 )
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
_UpperCAmelCase : Optional[Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
_UpperCAmelCase : Union[str, Any] = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : List[Any] = False
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = TrOCRStandaloneDecoderModelTester(self , is_training=lowercase )
_snake_case = ConfigTester(self , config_class=lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
pass
def A ( self : Any ):
'''simple docstring'''
pass
def A ( self : Any ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : int ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*lowercase )
def A ( self : Any ):
'''simple docstring'''
return
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def A ( self : str ):
'''simple docstring'''
pass | 686 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCamelCase : int = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : str = "pegasus"
_UpperCAmelCase : Union[str, Any] = ["past_key_values"]
_UpperCAmelCase : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , lowercase : Optional[Any]=50_265 , lowercase : Tuple=1_024 , lowercase : List[str]=12 , lowercase : List[str]=4_096 , lowercase : str=16 , lowercase : List[str]=12 , lowercase : List[str]=4_096 , lowercase : Optional[Any]=16 , lowercase : Tuple=0.0 , lowercase : Union[str, Any]=0.0 , lowercase : Union[str, Any]=True , lowercase : Any=True , lowercase : Union[str, Any]="gelu" , lowercase : str=1_024 , lowercase : int=0.1 , lowercase : str=0.0 , lowercase : Optional[Any]=0.0 , lowercase : Optional[int]=0.02 , lowercase : List[Any]=0 , lowercase : Union[str, Any]=False , lowercase : List[str]=0 , lowercase : List[str]=1 , lowercase : List[Any]=1 , **lowercase : Any , ):
'''simple docstring'''
_snake_case = vocab_size
_snake_case = max_position_embeddings
_snake_case = d_model
_snake_case = encoder_ffn_dim
_snake_case = encoder_layers
_snake_case = encoder_attention_heads
_snake_case = decoder_ffn_dim
_snake_case = decoder_layers
_snake_case = decoder_attention_heads
_snake_case = dropout
_snake_case = attention_dropout
_snake_case = activation_dropout
_snake_case = activation_function
_snake_case = init_std
_snake_case = encoder_layerdrop
_snake_case = decoder_layerdrop
_snake_case = use_cache
_snake_case = encoder_layers
_snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , decoder_start_token_id=lowercase , forced_eos_token_id=lowercase , **lowercase , )
@property
def A ( self : Any ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def A ( self : List[str] ):
'''simple docstring'''
return self.d_model | 686 |
import random
from .binary_exp_mod import bin_exp_mod
def a_ ( __lowercase : int , __lowercase : Any=1_000 ) -> int:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_snake_case = n - 1
_snake_case = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_snake_case = 0
while count < prec:
_snake_case = random.randint(2 , n - 1 )
_snake_case = bin_exp_mod(__lowercase , __lowercase , __lowercase )
if b != 1:
_snake_case = True
for _ in range(__lowercase ):
if b == n - 1:
_snake_case = False
break
_snake_case = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_lowerCamelCase : Tuple = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i))) | 686 | 1 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_lowerCamelCase : Dict = '''pt'''
elif is_tf_available():
_lowerCamelCase : List[str] = '''tf'''
else:
_lowerCamelCase : List[Any] = '''jax'''
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = PerceiverTokenizer
_UpperCAmelCase : Optional[int] = False
def A ( self : Tuple ):
'''simple docstring'''
super().setUp()
_snake_case = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A ( self : str ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def A ( self : Optional[int] , **lowercase : Dict ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : Optional[int] , lowercase : Tuple , lowercase : Optional[Any]=False , lowercase : int=20 , lowercase : Optional[int]=5 ):
'''simple docstring'''
_snake_case = []
for i in range(len(lowercase ) ):
try:
_snake_case = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_snake_case = list(filter(lambda lowercase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , lowercase ) )
_snake_case = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
_snake_case = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
_snake_case = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case = [t[0] for t in toks]
# Ensure consistency
_snake_case = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
_snake_case = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
_snake_case = ' ' + output_txt
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = 'Unicode €.'
_snake_case = tokenizer(lowercase )
_snake_case = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_snake_case = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]Unicode €.[SEP]' )
_snake_case = tokenizer('e è é ê ë' )
_snake_case = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_snake_case = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_snake_case = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
if FRAMEWORK != "jax":
_snake_case = list(batch.input_ids.numpy()[0] )
else:
_snake_case = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , lowercase )
self.assertIn('attention_mask' , lowercase )
self.assertNotIn('decoder_input_ids' , lowercase )
self.assertNotIn('decoder_attention_mask' , lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = [
'Summary of the text.',
'Another summary.',
]
_snake_case = tokenizer(
text_target=lowercase , max_length=32 , padding='max_length' , truncation=lowercase , return_tensors=lowercase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_snake_case = tokenizer.__class__.from_pretrained(lowercase )
_snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
shutil.rmtree(lowercase )
_snake_case = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_snake_case = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_snake_case = tokenizer.__class__.from_pretrained(lowercase )
_snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_snake_case = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowercase )
_snake_case = [f'''<extra_id_{i}>''' for i in range(125 )]
_snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
_snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case = tokenizer_class.from_pretrained(
lowercase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=lowercase )]
_snake_case = tokenizer_class.from_pretrained(
lowercase , additional_special_tokens=lowercase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
pass
def A ( self : List[str] ):
'''simple docstring'''
pass
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : int ):
'''simple docstring'''
_snake_case = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_snake_case = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
_snake_case = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(lowercase , lowercase ) | 686 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_lowerCamelCase : int = re.compile(r'''\s+''')
def a_ ( __lowercase : List[Any] ) -> int:
return {"hash": hashlib.mda(re.sub(__lowercase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def a_ ( __lowercase : List[Any] ) -> Dict:
_snake_case = [len(__lowercase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(__lowercase ), "line_max": max(__lowercase )}
def a_ ( __lowercase : Optional[int] ) -> List[str]:
_snake_case = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] ) -> Optional[int]:
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def a_ ( __lowercase : Union[str, Any] , __lowercase : int=5 ) -> Optional[Any]:
_snake_case = ['auto-generated', 'autogenerated', 'automatically generated']
_snake_case = example['content'].splitlines()
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a_ ( __lowercase : List[Any] , __lowercase : int=5 , __lowercase : Tuple=0.0_5 ) -> Union[str, Any]:
_snake_case = ['unit tests', 'test file', 'configuration file']
_snake_case = example['content'].splitlines()
_snake_case = 0
_snake_case = 0
# first test
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_snake_case = example['content'].count('\n' )
_snake_case = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a_ ( __lowercase : Union[str, Any] ) -> Any:
_snake_case = ['def ', 'class ', 'for ', 'while ']
_snake_case = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a_ ( __lowercase : Tuple , __lowercase : Any=4 ) -> List[str]:
_snake_case = example['content'].splitlines()
_snake_case = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a_ ( __lowercase : Dict ) -> Dict:
_snake_case = tokenizer(example['content'] , truncation=__lowercase )['input_ids']
_snake_case = len(example['content'] ) / len(__lowercase )
return {"ratio": ratio}
def a_ ( __lowercase : Optional[Any] ) -> Any:
_snake_case = {}
results.update(get_hash(__lowercase ) )
results.update(line_stats(__lowercase ) )
results.update(alpha_stats(__lowercase ) )
results.update(char_token_ratio(__lowercase ) )
results.update(is_autogenerated(__lowercase ) )
results.update(is_config_or_test(__lowercase ) )
results.update(has_no_keywords(__lowercase ) )
results.update(has_few_assignments(__lowercase ) )
return results
def a_ ( __lowercase : Optional[int] , __lowercase : str , __lowercase : List[Any] ) -> int:
if not check_uniques(__lowercase , __lowercase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a_ ( __lowercase : Dict ) -> Dict:
with open(__lowercase , 'rb' ) as f_in:
with gzip.open(str(__lowercase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(__lowercase , __lowercase )
os.unlink(__lowercase )
# Settings
_lowerCamelCase : Dict = HfArgumentParser(PreprocessingArguments)
_lowerCamelCase : Dict = parser.parse_args()
if args.num_workers is None:
_lowerCamelCase : int = multiprocessing.cpu_count()
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_lowerCamelCase : Any = time.time()
_lowerCamelCase : Optional[Any] = load_dataset(args.dataset_name, split='''train''')
print(F'Time to load dataset: {time.time()-t_start:.2f}')
# Run preprocessing
_lowerCamelCase : Optional[int] = time.time()
_lowerCamelCase : Union[str, Any] = ds.map(preprocess, num_proc=args.num_workers)
print(F'Time to preprocess dataset: {time.time()-t_start:.2f}')
# Deduplicate hashes
_lowerCamelCase : List[Any] = set(ds.unique('''hash'''))
_lowerCamelCase : Dict = len(uniques) / len(ds)
print(F'Fraction of duplicates: {1-frac:.2%}')
# Deduplicate data and apply heuristics
_lowerCamelCase : List[Any] = time.time()
_lowerCamelCase : Optional[int] = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F'Time to filter dataset: {time.time()-t_start:.2f}')
print(F'Size of filtered dataset: {len(ds_filter)}')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_lowerCamelCase : Union[str, Any] = time.time()
_lowerCamelCase , _lowerCamelCase : Dict = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'Time to deduplicate dataset: {time.time()-t_start:.2f}')
print(F'Size of deduplicate dataset: {len(ds_filter)}')
# Save data in batches of samples_per_file
_lowerCamelCase : Optional[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
_lowerCamelCase : int = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
_lowerCamelCase : Union[str, Any] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_lowerCamelCase : Dict = str(data_dir / F'file-{file_number+1:012}.json')
_lowerCamelCase : str = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'Time to save dataset: {time.time()-t_start:.2f}') | 686 | 1 |
from collections import defaultdict
def a_ ( __lowercase : int ) -> int:
_snake_case = 1
_snake_case = True
for v in tree[start]:
if v not in visited:
ret += dfs(__lowercase )
if ret % 2 == 0:
cuts.append(__lowercase )
return ret
def a_ ( ) -> List[Any]:
dfs(1 )
if __name__ == "__main__":
_lowerCamelCase , _lowerCamelCase : int = 10, 9
_lowerCamelCase : List[str] = defaultdict(list)
_lowerCamelCase : dict[int, bool] = {}
_lowerCamelCase : list[int] = []
_lowerCamelCase : int = 0
_lowerCamelCase : List[str] = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1) | 686 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : int = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = "yolos"
def __init__( self : int , lowercase : List[str]=768 , lowercase : Tuple=12 , lowercase : int=12 , lowercase : int=3_072 , lowercase : Optional[int]="gelu" , lowercase : str=0.0 , lowercase : Optional[int]=0.0 , lowercase : Optional[Any]=0.02 , lowercase : List[str]=1E-12 , lowercase : Dict=[512, 864] , lowercase : Union[str, Any]=16 , lowercase : List[Any]=3 , lowercase : List[str]=True , lowercase : Optional[int]=100 , lowercase : int=True , lowercase : Dict=False , lowercase : str=1 , lowercase : int=5 , lowercase : Tuple=2 , lowercase : List[str]=5 , lowercase : Any=2 , lowercase : List[str]=0.1 , **lowercase : int , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = qkv_bias
_snake_case = num_detection_tokens
_snake_case = use_mid_position_embeddings
_snake_case = auxiliary_loss
# Hungarian matcher
_snake_case = class_cost
_snake_case = bbox_cost
_snake_case = giou_cost
# Loss coefficients
_snake_case = bbox_loss_coefficient
_snake_case = giou_loss_coefficient
_snake_case = eos_coefficient
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = version.parse("1.11" )
@property
def A ( self : str ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Any ):
'''simple docstring'''
return 1E-4
@property
def A ( self : List[Any] ):
'''simple docstring'''
return 12 | 686 | 1 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def a_ ( ) -> Dict:
_snake_case = 10
_snake_case = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
_snake_case = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(__lowercase ) ),
} , features=__lowercase , )
return dataset
@pytest.fixture(scope='session' )
def a_ ( __lowercase : List[str] , __lowercase : Any ) -> Dict:
_snake_case = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=__lowercase )
return filename
# FILE_CONTENT + files
_lowerCamelCase : List[Any] = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope='session' )
def a_ ( __lowercase : Any ) -> Any:
_snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt'
_snake_case = FILE_CONTENT
with open(__lowercase , 'w' ) as f:
f.write(__lowercase )
return filename
@pytest.fixture(scope='session' )
def a_ ( __lowercase : Dict ) -> List[str]:
import bza
_snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
_snake_case = bytes(__lowercase , 'utf-8' )
with bza.open(__lowercase , 'wb' ) as f:
f.write(__lowercase )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : int ) -> Optional[Any]:
import gzip
_snake_case = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
_snake_case = bytes(__lowercase , 'utf-8' )
with gzip.open(__lowercase , 'wb' ) as f:
f.write(__lowercase )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : List[str] ) -> Union[str, Any]:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
_snake_case = bytes(__lowercase , 'utf-8' )
with lza.frame.open(__lowercase , 'wb' ) as f:
f.write(__lowercase )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : List[str] , __lowercase : Tuple ) -> Any:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(__lowercase , 'w' ) as archive:
archive.write(__lowercase , arcname=os.path.basename(__lowercase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : List[str] , __lowercase : List[str] ) -> Optional[Any]:
import tarfile
_snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(__lowercase , 'w' ) as f:
f.add(__lowercase , arcname=os.path.basename(__lowercase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : List[Any] ) -> Union[str, Any]:
import lzma
_snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
_snake_case = bytes(__lowercase , 'utf-8' )
with lzma.open(__lowercase , 'wb' ) as f:
f.write(__lowercase )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : Any , __lowercase : str ) -> Dict:
import zipfile
_snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(__lowercase , 'w' ) as f:
f.write(__lowercase , arcname=os.path.basename(__lowercase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : Optional[int] ) -> List[Any]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
_snake_case = bytes(__lowercase , 'utf-8' )
with zstd.open(__lowercase , 'wb' ) as f:
f.write(__lowercase )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : List[Any] ) -> str:
_snake_case = tmp_path_factory.mktemp('data' ) / 'file.xml'
_snake_case = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(__lowercase , 'w' ) as f:
f.write(__lowercase )
return filename
_lowerCamelCase : List[str] = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
_lowerCamelCase : str = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
_lowerCamelCase : Tuple = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
_lowerCamelCase : str = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
_lowerCamelCase : str = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope='session' )
def a_ ( ) -> int:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def a_ ( __lowercase : List[Any] ) -> List[str]:
_snake_case = datasets.Dataset.from_dict(__lowercase )
_snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=__lowercase )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : Union[str, Any] ) -> Dict:
_snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(__lowercase ) ) as con:
_snake_case = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : str ) -> Dict:
_snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(__lowercase , 'w' , newline='' ) as f:
_snake_case = csv.DictWriter(__lowercase , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__lowercase )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : Optional[Any] ) -> Optional[Any]:
_snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(__lowercase , 'w' , newline='' ) as f:
_snake_case = csv.DictWriter(__lowercase , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__lowercase )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : Dict , __lowercase : Dict ) -> Dict:
import bza
_snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(__lowercase , 'rb' ) as f:
_snake_case = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__lowercase , 'wb' ) as f:
f.write(__lowercase )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Dict ) -> Optional[int]:
_snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__lowercase , 'w' ) as f:
f.write(__lowercase , arcname=os.path.basename(__lowercase ) )
f.write(__lowercase , arcname=os.path.basename(__lowercase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : Dict , __lowercase : Optional[Any] , __lowercase : Any ) -> Optional[int]:
_snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__lowercase , 'w' ) as f:
f.write(__lowercase , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(__lowercase , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : int , __lowercase : Optional[Any] , __lowercase : str ) -> List[Any]:
_snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(__lowercase , 'w' ) as f:
f.write(__lowercase , arcname=os.path.join('main_dir' , os.path.basename(__lowercase ) ) )
f.write(__lowercase , arcname=os.path.join('main_dir' , os.path.basename(__lowercase ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : Optional[int] ) -> Optional[int]:
_snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
_snake_case = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(__lowercase , 'wb' ) as f:
_snake_case = pq.ParquetWriter(__lowercase , schema=__lowercase )
_snake_case = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__lowercase ) )] for k in DATA[0]} , schema=__lowercase )
writer.write_table(__lowercase )
writer.close()
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : List[str] ) -> Optional[int]:
_snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_snake_case = {'data': DATA}
with open(__lowercase , 'w' ) as f:
json.dump(__lowercase , __lowercase )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : str ) -> List[str]:
_snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_snake_case = {'data': DATA_DICT_OF_LISTS}
with open(__lowercase , 'w' ) as f:
json.dump(__lowercase , __lowercase )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : int ) -> List[str]:
_snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(__lowercase , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__lowercase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : str ) -> Tuple:
_snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(__lowercase , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__lowercase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : Dict ) -> List[Any]:
_snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(__lowercase , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(__lowercase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : List[Any] ) -> List[Any]:
_snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(__lowercase , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(__lowercase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : List[Any] , __lowercase : str ) -> List[Any]:
import gzip
_snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(__lowercase , 'rb' ) as orig_file:
with gzip.open(__lowercase , 'wb' ) as zipped_file:
zipped_file.writelines(__lowercase )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : Any , __lowercase : Dict ) -> List[Any]:
import gzip
_snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(__lowercase , 'rb' ) as orig_file:
with gzip.open(__lowercase , 'wb' ) as zipped_file:
zipped_file.writelines(__lowercase )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : int , __lowercase : Dict , __lowercase : Any ) -> Optional[int]:
_snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(__lowercase , 'w' ) as f:
f.write(__lowercase , arcname=os.path.basename(__lowercase ) )
f.write(__lowercase , arcname=os.path.basename(__lowercase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : int , __lowercase : List[str] , __lowercase : Tuple , __lowercase : int ) -> Any:
_snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(__lowercase , 'w' ) as f:
f.write(__lowercase , arcname=os.path.join('nested' , os.path.basename(__lowercase ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : Union[str, Any] , __lowercase : List[str] , __lowercase : Union[str, Any] ) -> Dict:
_snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(__lowercase , 'w' ) as f:
f.write(__lowercase , arcname=os.path.join('main_dir' , os.path.basename(__lowercase ) ) )
f.write(__lowercase , arcname=os.path.join('main_dir' , os.path.basename(__lowercase ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : Union[str, Any] , __lowercase : Tuple , __lowercase : int ) -> Any:
_snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(__lowercase , 'w' ) as f:
f.add(__lowercase , arcname=os.path.basename(__lowercase ) )
f.add(__lowercase , arcname=os.path.basename(__lowercase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : int , __lowercase : Union[str, Any] , __lowercase : List[Any] , __lowercase : Any ) -> str:
_snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(__lowercase , 'w' ) as f:
f.add(__lowercase , arcname=os.path.join('nested' , os.path.basename(__lowercase ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : int ) -> Any:
_snake_case = ['0', '1', '2', '3']
_snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(__lowercase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : Tuple ) -> Tuple:
_snake_case = ['0', '1', '2', '3']
_snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(__lowercase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : Dict ) -> Optional[Any]:
_snake_case = ['0', '1', '2', '3']
_snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(__lowercase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : Tuple , __lowercase : List[Any] , __lowercase : List[Any] ) -> List[Any]:
_snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(__lowercase , 'w' ) as f:
f.write(__lowercase , arcname=os.path.basename(__lowercase ) )
f.write(__lowercase , arcname=os.path.basename(__lowercase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : Optional[int] ) -> int:
_snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(__lowercase , 'w' ) as f:
f.write(__lowercase , arcname=os.path.join('main_dir' , os.path.basename(__lowercase ) ) )
f.write(__lowercase , arcname=os.path.join('main_dir' , os.path.basename(__lowercase ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : str , __lowercase : Any , __lowercase : Optional[Any] ) -> List[Any]:
_snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(__lowercase , 'w' ) as f:
f.write(__lowercase , arcname=os.path.basename('unsupported.ext' ) )
f.write(__lowercase , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : int ) -> Any:
_snake_case = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
_snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(__lowercase , 'w' , encoding='utf-8' ) as f:
f.write(__lowercase )
return path
@pytest.fixture(scope='session' )
def a_ ( ) -> Dict:
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def a_ ( ) -> int:
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def a_ ( __lowercase : str , __lowercase : str ) -> Dict:
_snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(__lowercase , 'w' ) as f:
f.write(__lowercase , arcname=os.path.basename(__lowercase ) )
f.write(__lowercase , arcname=os.path.basename(__lowercase ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def a_ ( __lowercase : Union[str, Any] ) -> int:
_snake_case = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir | 686 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_lowerCamelCase : Tuple = logging.get_logger(__name__)
# General docstring
_lowerCamelCase : Union[str, Any] = '''ResNetConfig'''
# Base docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[Any] = [1, 2_048, 7, 7]
# Image classification docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[int] = '''tiger cat'''
_lowerCamelCase : str = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 3 , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(
lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=kernel_size // 2 , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def A ( self : Union[str, Any] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def A ( self : Tuple , lowercase : Tensor ):
'''simple docstring'''
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowercase )
_snake_case = self.pooler(lowercase )
return embedding
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase : int , lowercase : int , lowercase : int = 2 ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(lowercase , lowercase , kernel_size=1 , stride=lowercase , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : List[str] , lowercase : List[str] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" , lowercase : int = 4 ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , kernel_size=1 ) , ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : Dict , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowercase : ResNetConfig , lowercase : int , lowercase : int , lowercase : int = 2 , lowercase : int = 2 , ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , stride=lowercase , activation=config.hidden_act ) , *[layer(lowercase , lowercase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase , config.depths[1:] ):
self.stages.append(ResNetStage(lowercase , lowercase , lowercase , depth=lowercase ) )
def A ( self : str , lowercase : Tensor , lowercase : bool = False , lowercase : bool = True ):
'''simple docstring'''
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowercase )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase , hidden_states=lowercase , )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = ResNetConfig
_UpperCAmelCase : Tuple = "resnet"
_UpperCAmelCase : Optional[Any] = "pixel_values"
_UpperCAmelCase : Dict = True
def A ( self : List[str] , lowercase : Dict ):
'''simple docstring'''
if isinstance(lowercase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowercase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def A ( self : Tuple , lowercase : List[Any] , lowercase : Optional[Any]=False ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
_snake_case = value
_lowerCamelCase : str = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCamelCase : int = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : Any ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : Union[str, Any] , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowercase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase : int ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowercase )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Union[str, Any] , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[torch.LongTensor] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowercase )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowercase , lowercase )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : Union[str, Any] ):
'''simple docstring'''
super().__init__(lowercase )
super()._init_backbone(lowercase )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@replace_return_docstrings(output_type=lowercase , config_class=_CONFIG_FOR_DOC )
def A ( self : Dict , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowercase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowercase , ) | 686 | 1 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a_ ( __lowercase : List[Any] , __lowercase : Optional[int] ) -> Tuple:
assert isinstance(__lowercase , __lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def a_ ( __lowercase : Optional[Any] , __lowercase : List[Any] , __lowercase : Union[str, Any] ) -> Union[str, Any]:
_snake_case = tmp_path / 'cache'
_snake_case = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case = ParquetDatasetReader(__lowercase , cache_dir=__lowercase , keep_in_memory=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def a_ ( __lowercase : Optional[Any] , __lowercase : int , __lowercase : List[str] ) -> List[str]:
_snake_case = tmp_path / 'cache'
_snake_case = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_snake_case = features.copy() if features else default_expected_features
_snake_case = (
Features({feature: Value(__lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case = ParquetDatasetReader(__lowercase , features=__lowercase , cache_dir=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def a_ ( __lowercase : Tuple , __lowercase : List[str] , __lowercase : List[str] ) -> Optional[int]:
_snake_case = tmp_path / 'cache'
_snake_case = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_snake_case = ParquetDatasetReader(__lowercase , cache_dir=__lowercase , split=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : List[str] ) -> Tuple:
if issubclass(__lowercase , __lowercase ):
_snake_case = parquet_path
elif issubclass(__lowercase , __lowercase ):
_snake_case = [parquet_path]
_snake_case = tmp_path / 'cache'
_snake_case = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_snake_case = ParquetDatasetReader(__lowercase , cache_dir=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
def a_ ( __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Tuple=("train",) ) -> int:
assert isinstance(__lowercase , __lowercase )
for split in splits:
_snake_case = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def a_ ( __lowercase : str , __lowercase : Dict , __lowercase : str ) -> Any:
_snake_case = tmp_path / 'cache'
_snake_case = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case = ParquetDatasetReader(
{'train': parquet_path} , cache_dir=__lowercase , keep_in_memory=__lowercase ).read()
_check_parquet_datasetdict(__lowercase , __lowercase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def a_ ( __lowercase : List[str] , __lowercase : Tuple , __lowercase : List[str] ) -> Any:
_snake_case = tmp_path / 'cache'
_snake_case = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_snake_case = features.copy() if features else default_expected_features
_snake_case = (
Features({feature: Value(__lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case = ParquetDatasetReader({'train': parquet_path} , features=__lowercase , cache_dir=__lowercase ).read()
_check_parquet_datasetdict(__lowercase , __lowercase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def a_ ( __lowercase : Optional[int] , __lowercase : str , __lowercase : Optional[int] ) -> List[str]:
if split:
_snake_case = {split: parquet_path}
else:
_snake_case = 'train'
_snake_case = {'train': parquet_path, 'test': parquet_path}
_snake_case = tmp_path / 'cache'
_snake_case = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_snake_case = ParquetDatasetReader(__lowercase , cache_dir=__lowercase ).read()
_check_parquet_datasetdict(__lowercase , __lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a_ ( __lowercase : str , __lowercase : List[Any] ) -> Optional[int]:
_snake_case = ParquetDatasetWriter(__lowercase , tmp_path / 'foo.parquet' )
assert writer.write() > 0
_snake_case = pq.ParquetFile(tmp_path / 'foo.parquet' )
_snake_case = pf.read()
assert dataset.data.table == output_table
def a_ ( __lowercase : Optional[int] , __lowercase : Dict ) -> List[Any]:
_snake_case = str(shared_datadir / 'test_image_rgb.jpg' )
_snake_case = {'image': [image_path]}
_snake_case = Features({'image': Image()} )
_snake_case = Dataset.from_dict(__lowercase , features=__lowercase )
_snake_case = ParquetDatasetWriter(__lowercase , tmp_path / 'foo.parquet' )
assert writer.write() > 0
_snake_case = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) )
assert dataset.features == reloaded_dataset.features
_snake_case = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) , streaming=__lowercase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'feature, expected' , [
(Features({'foo': Value('int32' )} ), None),
(Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def a_ ( __lowercase : Union[str, Any] , __lowercase : int ) -> Optional[int]:
assert get_writer_batch_size(__lowercase ) == expected | 686 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {'''vocab_file''': '''spiece.model'''}
_lowerCamelCase : Dict = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
_lowerCamelCase : Tuple = {'''bert_for_seq_generation''': 512}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
_UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[int] = []
_UpperCAmelCase : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , lowercase : List[Any] , lowercase : Optional[int]="<s>" , lowercase : List[str]="</s>" , lowercase : Optional[int]="<unk>" , lowercase : Optional[Any]="<pad>" , lowercase : Any="<::::>" , lowercase : Optional[Dict[str, Any]] = None , **lowercase : Tuple , ):
'''simple docstring'''
_snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , pad_token=lowercase , sep_token=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
_snake_case = vocab_file
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
@property
def A ( self : List[str] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def A ( self : int ):
'''simple docstring'''
_snake_case = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
'''simple docstring'''
_snake_case = self.__dict__.copy()
_snake_case = None
return state
def __setstate__( self : List[Any] , lowercase : Tuple ):
'''simple docstring'''
_snake_case = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_snake_case = {}
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A ( self : List[str] , lowercase : str ):
'''simple docstring'''
return self.sp_model.encode(lowercase , out_type=lowercase )
def A ( self : List[Any] , lowercase : Optional[int] ):
'''simple docstring'''
return self.sp_model.piece_to_id(lowercase )
def A ( self : Tuple , lowercase : str ):
'''simple docstring'''
_snake_case = self.sp_model.IdToPiece(lowercase )
return token
def A ( self : Dict , lowercase : Optional[Any] ):
'''simple docstring'''
_snake_case = []
_snake_case = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase ) + token
_snake_case = []
else:
current_sub_tokens.append(lowercase )
out_string += self.sp_model.decode(lowercase )
return out_string.strip()
def A ( self : str , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowercase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , 'wb' ) as fi:
_snake_case = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,) | 686 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def a_ ( __lowercase : Union[str, Any] ) -> List[Any]:
_snake_case = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
_snake_case = DetaConfig(
backbone_config=__lowercase , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=__lowercase , with_box_refine=__lowercase , two_stage=__lowercase , )
# set labels
_snake_case = 'huggingface/label-files'
if "o365" in model_name:
_snake_case = 366
_snake_case = 'object365-id2label.json'
else:
_snake_case = 91
_snake_case = 'coco-detection-id2label.json'
_snake_case = num_labels
_snake_case = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type='dataset' ) ) , 'r' ) )
_snake_case = {int(__lowercase ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def a_ ( __lowercase : int ) -> str:
_snake_case = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : str ) -> Union[str, Any]:
_snake_case = dct.pop(__lowercase )
_snake_case = val
def a_ ( __lowercase : List[str] , __lowercase : str ) -> Dict:
_snake_case = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_snake_case = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:dim, :]
_snake_case = in_proj_bias[: dim]
_snake_case = in_proj_weight[
dim : dim * 2, :
]
_snake_case = in_proj_bias[
dim : dim * 2
]
_snake_case = in_proj_weight[
-dim :, :
]
_snake_case = in_proj_bias[-dim :]
# fmt: on
def a_ ( __lowercase : Dict , __lowercase : Dict ) -> str:
# transformer decoder self-attention layers
_snake_case = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:hidden_size, :]
_snake_case = in_proj_bias[:hidden_size]
_snake_case = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_snake_case = in_proj_bias[hidden_size : hidden_size * 2]
_snake_case = in_proj_weight[-hidden_size:, :]
_snake_case = in_proj_bias[-hidden_size:]
def a_ ( ) -> List[str]:
_snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a_ ( __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Tuple ) -> Optional[Any]:
_snake_case = get_deta_config(__lowercase )
# load original state dict
if model_name == "deta-swin-large":
_snake_case = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
_snake_case = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
_snake_case = torch.load(__lowercase , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(__lowercase , param.shape )
# rename keys
_snake_case = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_swin_q_k_v(__lowercase , config.backbone_config )
read_in_decoder_q_k_v(__lowercase , __lowercase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "input_proj" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
# finally, create HuggingFace model and load state dict
_snake_case = DetaForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
_snake_case = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(__lowercase )
# load image processor
_snake_case = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
_snake_case = prepare_img()
_snake_case = processor(images=__lowercase , return_tensors='pt' )
_snake_case = encoding['pixel_values']
_snake_case = model(pixel_values.to(__lowercase ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_snake_case = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
_snake_case = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
_snake_case = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
_snake_case = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__lowercase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__lowercase ) , atol=1E-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 686 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = "lilt"
def __init__( self : Optional[int] , lowercase : List[str]=30_522 , lowercase : Tuple=768 , lowercase : Union[str, Any]=12 , lowercase : int=12 , lowercase : List[str]=3_072 , lowercase : List[Any]="gelu" , lowercase : List[Any]=0.1 , lowercase : Dict=0.1 , lowercase : Any=512 , lowercase : List[str]=2 , lowercase : Tuple=0.02 , lowercase : Dict=1E-12 , lowercase : Tuple=0 , lowercase : List[Any]="absolute" , lowercase : Dict=None , lowercase : Dict=4 , lowercase : Tuple=1_024 , **lowercase : Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase , **lowercase )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = position_embedding_type
_snake_case = classifier_dropout
_snake_case = channel_shrink_ratio
_snake_case = max_ad_position_embeddings | 686 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_lowerCamelCase : Dict = '''pt'''
elif is_tf_available():
_lowerCamelCase : List[str] = '''tf'''
else:
_lowerCamelCase : List[Any] = '''jax'''
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = PerceiverTokenizer
_UpperCAmelCase : Optional[int] = False
def A ( self : Tuple ):
'''simple docstring'''
super().setUp()
_snake_case = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A ( self : str ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def A ( self : Optional[int] , **lowercase : Dict ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : Optional[int] , lowercase : Tuple , lowercase : Optional[Any]=False , lowercase : int=20 , lowercase : Optional[int]=5 ):
'''simple docstring'''
_snake_case = []
for i in range(len(lowercase ) ):
try:
_snake_case = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_snake_case = list(filter(lambda lowercase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , lowercase ) )
_snake_case = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
_snake_case = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
_snake_case = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case = [t[0] for t in toks]
# Ensure consistency
_snake_case = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
_snake_case = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
_snake_case = ' ' + output_txt
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = 'Unicode €.'
_snake_case = tokenizer(lowercase )
_snake_case = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_snake_case = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]Unicode €.[SEP]' )
_snake_case = tokenizer('e è é ê ë' )
_snake_case = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_snake_case = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_snake_case = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
if FRAMEWORK != "jax":
_snake_case = list(batch.input_ids.numpy()[0] )
else:
_snake_case = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , lowercase )
self.assertIn('attention_mask' , lowercase )
self.assertNotIn('decoder_input_ids' , lowercase )
self.assertNotIn('decoder_attention_mask' , lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = [
'Summary of the text.',
'Another summary.',
]
_snake_case = tokenizer(
text_target=lowercase , max_length=32 , padding='max_length' , truncation=lowercase , return_tensors=lowercase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_snake_case = tokenizer.__class__.from_pretrained(lowercase )
_snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
shutil.rmtree(lowercase )
_snake_case = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_snake_case = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_snake_case = tokenizer.__class__.from_pretrained(lowercase )
_snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_snake_case = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowercase )
_snake_case = [f'''<extra_id_{i}>''' for i in range(125 )]
_snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
_snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case = tokenizer_class.from_pretrained(
lowercase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=lowercase )]
_snake_case = tokenizer_class.from_pretrained(
lowercase , additional_special_tokens=lowercase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
pass
def A ( self : List[str] ):
'''simple docstring'''
pass
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : int ):
'''simple docstring'''
_snake_case = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_snake_case = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
_snake_case = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(lowercase , lowercase ) | 686 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : int , lowercase : Dict=13 , lowercase : List[str]=3 , lowercase : Optional[int]=224 , lowercase : List[Any]=30 , lowercase : Tuple=400 , lowercase : Tuple=True , lowercase : List[str]=None , lowercase : Optional[int]=True , lowercase : Dict=[0.5, 0.5, 0.5] , lowercase : Union[str, Any]=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
_snake_case = size if size is not None else {'height': 18, 'width': 18}
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = image_size
_snake_case = min_resolution
_snake_case = max_resolution
_snake_case = do_resize
_snake_case = size
_snake_case = do_normalize
_snake_case = image_mean
_snake_case = image_std
def A ( self : Optional[int] ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = ViTImageProcessor if is_vision_available() else None
def A ( self : Any ):
'''simple docstring'''
_snake_case = EfficientFormerImageProcessorTester(self )
@property
def A ( self : Dict ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def A ( self : int ):
'''simple docstring'''
_snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , 'image_mean' ) )
self.assertTrue(hasattr(lowercase , 'image_std' ) )
self.assertTrue(hasattr(lowercase , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase , 'do_resize' ) )
self.assertTrue(hasattr(lowercase , 'size' ) )
def A ( self : List[str] ):
'''simple docstring'''
pass
def A ( self : Any ):
'''simple docstring'''
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
_snake_case = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
_snake_case = image_processor(lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
_snake_case = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
_snake_case = image_processor(lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
_snake_case = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
_snake_case = image_processor(lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , ) | 686 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def a_ ( ) -> Optional[int]:
_snake_case , _snake_case = 9, 14 # noqa: F841
_snake_case = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_snake_case = defaultdict(__lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_snake_case = mst(__lowercase )
_snake_case = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_snake_case = tuple(answer[:2] )
_snake_case = tuple(edge[::-1] )
assert edge in result or reverse in result | 686 | 1 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
_lowerCamelCase : Dict = trt.Logger(trt.Logger.WARNING)
_lowerCamelCase : int = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
_lowerCamelCase : List[str] = logging.getLogger(__name__)
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=384,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=128,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
_lowerCamelCase : Any = parser.parse_args()
if args.tokenizer_name:
_lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
_lowerCamelCase : int = args.per_device_eval_batch_size
_lowerCamelCase : List[str] = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : int = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
_lowerCamelCase : List[str] = '''temp_engine/bert-fp16.engine'''
if args.inta:
_lowerCamelCase : Any = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
_lowerCamelCase : Dict = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
_lowerCamelCase : int = [network.get_input(i) for i in range(network.num_inputs)]
_lowerCamelCase : Any = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
_lowerCamelCase : Dict = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
_lowerCamelCase : Any = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
_lowerCamelCase : Any = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def a_ ( __lowercase : List[str] , __lowercase : Any , __lowercase : Dict , __lowercase : Tuple , __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : Any , __lowercase : Union[str, Any] ) -> Any:
_snake_case = np.asarray(inputs['input_ids'] , dtype=np.intaa )
_snake_case = np.asarray(inputs['attention_mask'] , dtype=np.intaa )
_snake_case = np.asarray(inputs['token_type_ids'] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , __lowercase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , __lowercase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , __lowercase )
# start time
_snake_case = time.time()
# Run inference
context.execute_async(
bindings=[int(__lowercase ) for d_inp in d_inputs] + [int(__lowercase ), int(__lowercase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(__lowercase , __lowercase , __lowercase )
cuda.memcpy_dtoh_async(__lowercase , __lowercase , __lowercase )
# Synchronize the stream and take time
stream.synchronize()
# end time
_snake_case = time.time()
_snake_case = end_time - start_time
_snake_case = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
_lowerCamelCase : Optional[Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_lowerCamelCase : Any = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
_lowerCamelCase : Any = raw_datasets['''validation'''].column_names
_lowerCamelCase : Tuple = '''question''' if '''question''' in column_names else column_names[0]
_lowerCamelCase : Any = '''context''' if '''context''' in column_names else column_names[1]
_lowerCamelCase : Dict = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
_lowerCamelCase : Any = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'
)
_lowerCamelCase : Tuple = min(args.max_seq_length, tokenizer.model_max_length)
def a_ ( __lowercase : int ) -> str:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
_snake_case = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
_snake_case = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='only_second' if pad_on_right else 'only_first' , max_length=__lowercase , stride=args.doc_stride , return_overflowing_tokens=__lowercase , return_offsets_mapping=__lowercase , padding='max_length' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
_snake_case = tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
_snake_case = []
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
_snake_case = tokenized_examples.sequence_ids(__lowercase )
_snake_case = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
_snake_case = sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
_snake_case = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
_lowerCamelCase : Any = raw_datasets['''validation''']
# Validation Feature Creation
_lowerCamelCase : List[Any] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
_lowerCamelCase : int = default_data_collator
_lowerCamelCase : Any = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
_lowerCamelCase : Optional[int] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def a_ ( __lowercase : Dict , __lowercase : Any , __lowercase : List[Any] , __lowercase : Tuple="eval" ) -> List[str]:
# Post-processing: we match the start logits and end logits to answers in the original context.
_snake_case = postprocess_qa_predictions(
examples=__lowercase , features=__lowercase , predictions=__lowercase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=__lowercase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
_snake_case = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
_snake_case = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
_snake_case = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=__lowercase , label_ids=__lowercase )
_lowerCamelCase : Optional[int] = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def a_ ( __lowercase : Any ) -> List[Any]:
return trt.volume(engine.get_binding_shape(__lowercase ) ) * engine.get_binding_dtype(__lowercase ).itemsize
# Allocate device memory for inputs and outputs.
_lowerCamelCase : Dict = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
_lowerCamelCase : Any = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
_lowerCamelCase : Optional[Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
_lowerCamelCase : Optional[int] = cuda.mem_alloc(h_outputa.nbytes)
_lowerCamelCase : Tuple = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
_lowerCamelCase : List[str] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(F' Num examples = {len(eval_dataset)}')
logger.info(F' Batch size = {args.per_device_eval_batch_size}')
_lowerCamelCase : Tuple = 0.0
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : Tuple = timeit.default_timer()
_lowerCamelCase : Optional[int] = None
for step, batch in enumerate(eval_dataloader):
_lowerCamelCase , _lowerCamelCase : Dict = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
_lowerCamelCase , _lowerCamelCase : Optional[Any] = outputs
_lowerCamelCase : Any = torch.tensor(start_logits)
_lowerCamelCase : Union[str, Any] = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
_lowerCamelCase : Optional[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
_lowerCamelCase : Union[str, Any] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
_lowerCamelCase : Any = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
_lowerCamelCase : str = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
_lowerCamelCase : Optional[Any] = nested_truncate(all_preds, len(eval_dataset))
_lowerCamelCase : List[Any] = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1_000 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1_000))
logger.info('''Total Number of Inference = %d''', niter)
_lowerCamelCase : Tuple = post_processing_function(eval_examples, eval_dataset, all_preds)
_lowerCamelCase : str = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F'Evaluation metrics: {eval_metric}') | 686 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Tuple = ["transformers", "torch", "note_seq"]
def __init__( self : List[Any] , *lowercase : List[Any] , **lowercase : Dict ):
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] ) | 686 | 1 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Dict = '''▁'''
_lowerCamelCase : Optional[Any] = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_lowerCamelCase : List[str] = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
_lowerCamelCase : Any = {
'''facebook/m2m100_418M''': 1_024,
}
# fmt: off
_lowerCamelCase : Any = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = VOCAB_FILES_NAMES
_UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Optional[int] = ["input_ids", "attention_mask"]
_UpperCAmelCase : List[int] = []
_UpperCAmelCase : List[int] = []
def __init__( self : str , lowercase : Dict , lowercase : List[Any] , lowercase : int=None , lowercase : str=None , lowercase : Dict="<s>" , lowercase : List[str]="</s>" , lowercase : Optional[int]="</s>" , lowercase : Any="<pad>" , lowercase : List[str]="<unk>" , lowercase : Any="m2m100" , lowercase : Optional[Dict[str, Any]] = None , lowercase : int=8 , **lowercase : Optional[Any] , ):
'''simple docstring'''
_snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
_snake_case = language_codes
_snake_case = FAIRSEQ_LANGUAGE_CODES[language_codes]
_snake_case = {lang_code: f'''__{lang_code}__''' for lang_code in fairseq_language_code}
_snake_case = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(lowercase )
for lang_code in fairseq_language_code
if self.get_lang_token(lowercase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowercase , tgt_lang=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , unk_token=lowercase , pad_token=lowercase , language_codes=lowercase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=lowercase , **lowercase , )
_snake_case = vocab_file
_snake_case = load_json(lowercase )
_snake_case = {v: k for k, v in self.encoder.items()}
_snake_case = spm_file
_snake_case = load_spm(lowercase , self.sp_model_kwargs )
_snake_case = len(self.encoder )
_snake_case = {
self.get_lang_token(lowercase ): self.encoder_size + i for i, lang_code in enumerate(lowercase )
}
_snake_case = {lang_code: self.encoder_size + i for i, lang_code in enumerate(lowercase )}
_snake_case = {v: k for k, v in self.lang_token_to_id.items()}
_snake_case = src_lang if src_lang is not None else 'en'
_snake_case = tgt_lang
_snake_case = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_snake_case = num_madeup_words
@property
def A ( self : Optional[int] ):
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def A ( self : Any ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def A ( self : List[Any] , lowercase : str ):
'''simple docstring'''
_snake_case = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A ( self : Optional[Any] , lowercase : str ):
'''simple docstring'''
return self.sp_model.encode(lowercase , out_type=lowercase )
def A ( self : Any , lowercase : Optional[Any] ):
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(lowercase , self.encoder[self.unk_token] )
def A ( self : Dict , lowercase : int ):
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(lowercase , self.unk_token )
def A ( self : Optional[Any] , lowercase : Dict ):
'''simple docstring'''
_snake_case = []
_snake_case = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase ) + token
_snake_case = []
else:
current_sub_tokens.append(lowercase )
out_string += self.sp_model.decode(lowercase )
return out_string.strip()
def A ( self : List[str] , lowercase : List[int] , lowercase : Optional[List[int]] = None , lowercase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
_snake_case = [1] * len(self.prefix_tokens )
_snake_case = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase )) + suffix_ones
return prefix_ones + ([0] * len(lowercase )) + ([0] * len(lowercase )) + suffix_ones
def A ( self : str , lowercase : List[int] , lowercase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A ( self : str ):
'''simple docstring'''
_snake_case = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ):
'''simple docstring'''
_snake_case = self.__dict__.copy()
_snake_case = None
return state
def __setstate__( self : Optional[Any] , lowercase : Dict ):
'''simple docstring'''
_snake_case = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_snake_case = {}
_snake_case = load_spm(self.spm_file , self.sp_model_kwargs )
def A ( self : str , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
_snake_case = Path(lowercase )
if not save_dir.is_dir():
raise OSError(f'''{save_directory} should be a directory''' )
_snake_case = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
_snake_case = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , lowercase )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowercase )
elif not os.path.isfile(self.spm_file ):
with open(lowercase , 'wb' ) as fi:
_snake_case = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (str(lowercase ), str(lowercase ))
def A ( self : int , lowercase : List[str] , lowercase : str = "en" , lowercase : Optional[List[str]] = None , lowercase : str = "ro" , **lowercase : Optional[int] , ):
'''simple docstring'''
_snake_case = src_lang
_snake_case = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase )
def A ( self : Optional[Any] , lowercase : Any , lowercase : Optional[str] , lowercase : Optional[str] , **lowercase : Dict ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_snake_case = src_lang
_snake_case = self(lowercase , add_special_tokens=lowercase , **lowercase )
_snake_case = self.get_lang_id(lowercase )
_snake_case = tgt_lang_id
return inputs
def A ( self : Optional[Any] ):
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def A ( self : Any ):
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def A ( self : int , lowercase : str ):
'''simple docstring'''
_snake_case = self.get_lang_token(lowercase )
_snake_case = self.lang_token_to_id[lang_token]
_snake_case = [self.cur_lang_id]
_snake_case = [self.eos_token_id]
def A ( self : Optional[Any] , lowercase : str ):
'''simple docstring'''
_snake_case = self.get_lang_token(lowercase )
_snake_case = self.lang_token_to_id[lang_token]
_snake_case = [self.cur_lang_id]
_snake_case = [self.eos_token_id]
def A ( self : Optional[int] , lowercase : str ):
'''simple docstring'''
return self.lang_code_to_token[lang]
def A ( self : List[Any] , lowercase : str ):
'''simple docstring'''
_snake_case = self.get_lang_token(lowercase )
return self.lang_token_to_id[lang_token]
def a_ ( __lowercase : str , __lowercase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_snake_case = sentencepiece.SentencePieceProcessor(**__lowercase )
spm.Load(str(__lowercase ) )
return spm
def a_ ( __lowercase : str ) -> Union[Dict, List]:
with open(__lowercase , 'r' ) as f:
return json.load(__lowercase )
def a_ ( __lowercase : Optional[int] , __lowercase : str ) -> None:
with open(__lowercase , 'w' ) as f:
json.dump(__lowercase , __lowercase , indent=2 ) | 686 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a_ ( ) -> Optional[Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__lowercase ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def a_ ( ) -> Optional[int]:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def a_ ( ) -> Dict:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__lowercase ):
http_head('https://huggingface.co' ) | 686 | 1 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = LEDConfig
_UpperCAmelCase : int = {}
_UpperCAmelCase : List[str] = "gelu"
def __init__( self : Union[str, Any] , lowercase : Optional[int] , lowercase : Dict=13 , lowercase : Dict=7 , lowercase : Tuple=True , lowercase : Dict=False , lowercase : Dict=99 , lowercase : Any=32 , lowercase : List[Any]=2 , lowercase : List[str]=4 , lowercase : List[str]=37 , lowercase : Dict=0.1 , lowercase : int=0.1 , lowercase : List[Any]=20 , lowercase : int=2 , lowercase : Optional[Any]=1 , lowercase : List[str]=0 , lowercase : Optional[int]=4 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = eos_token_id
_snake_case = pad_token_id
_snake_case = bos_token_id
_snake_case = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case = prepare_led_inputs_dict(lowercase , lowercase , lowercase )
_snake_case = tf.concat(
[tf.zeros_like(lowercase )[:, :-1], tf.ones_like(lowercase )[:, -1:]] , axis=-1 , )
_snake_case = global_attention_mask
return config, inputs_dict
def A ( self : str , lowercase : str , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = TFLEDModel(config=lowercase ).get_decoder()
_snake_case = inputs_dict['input_ids']
_snake_case = input_ids[:1, :]
_snake_case = inputs_dict['attention_mask'][:1, :]
_snake_case = 1
# first forward pass
_snake_case = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
_snake_case , _snake_case = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case = model(lowercase , attention_mask=lowercase )[0]
_snake_case = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case = output_from_no_past[:, -3:, random_slice_idx]
_snake_case = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : str=None , ) -> Union[str, Any]:
if attention_mask is None:
_snake_case = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_UpperCAmelCase : Optional[int] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_UpperCAmelCase : Tuple = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCAmelCase : str = True
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : str = False
_UpperCAmelCase : List[Any] = False
def A ( self : Any ):
'''simple docstring'''
_snake_case = TFLEDModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = tf.zeros_like(inputs_dict['attention_mask'] )
_snake_case = 2
_snake_case = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
_snake_case = True
_snake_case = self.model_tester.seq_length
_snake_case = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase : List[str] ):
_snake_case = outputs.decoder_attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase : List[str] ):
_snake_case = [t.numpy() for t in outputs.encoder_attentions]
_snake_case = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = False
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
_snake_case = len(lowercase )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
if self.is_encoder_decoder:
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_decoder_attentions_output(lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
# Check attention is always last and order is fine
_snake_case = True
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase ) )
self.assertEqual(model.config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def A ( self : List[Any] ):
'''simple docstring'''
pass
def A ( self : Any ):
'''simple docstring'''
pass
def a_ ( __lowercase : str ) -> Optional[Any]:
return tf.constant(__lowercase , dtype=tf.intaa )
_lowerCamelCase : List[Any] = 1E-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 )
def A ( self : str ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 , rtol=1E-3 ) | 686 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCamelCase : Optional[int] = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
_lowerCamelCase : List[str] = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
_lowerCamelCase : Dict = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def A ( self : Union[str, Any] , lowercase : Tuple , lowercase : Optional[Any] , lowercase : int=None , lowercase : str=True , lowercase : List[str]=False ):
'''simple docstring'''
if rouge_types is None:
_snake_case = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
_snake_case = rouge_scorer.RougeScorer(rouge_types=lowercase , use_stemmer=lowercase )
if use_aggregator:
_snake_case = scoring.BootstrapAggregator()
else:
_snake_case = []
for ref, pred in zip(lowercase , lowercase ):
_snake_case = scorer.score(lowercase , lowercase )
if use_aggregator:
aggregator.add_scores(lowercase )
else:
scores.append(lowercase )
if use_aggregator:
_snake_case = aggregator.aggregate()
else:
_snake_case = {}
for key in scores[0]:
_snake_case = [score[key] for score in scores]
return result | 686 | 1 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
_lowerCamelCase : int = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
_lowerCamelCase : List[Any] = F'https://www.google.com/search?q={query}&num=100'
_lowerCamelCase : int = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
_lowerCamelCase : int = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
_lowerCamelCase : Any = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link) | 686 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = "swin2sr"
_UpperCAmelCase : Optional[int] = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[int] , lowercase : List[Any]=64 , lowercase : int=1 , lowercase : Union[str, Any]=3 , lowercase : Dict=180 , lowercase : List[Any]=[6, 6, 6, 6, 6, 6] , lowercase : Dict=[6, 6, 6, 6, 6, 6] , lowercase : List[Any]=8 , lowercase : List[str]=2.0 , lowercase : Tuple=True , lowercase : Union[str, Any]=0.0 , lowercase : Dict=0.0 , lowercase : Optional[int]=0.1 , lowercase : int="gelu" , lowercase : List[str]=False , lowercase : List[Any]=0.02 , lowercase : List[Any]=1E-5 , lowercase : Optional[int]=2 , lowercase : Tuple=1.0 , lowercase : List[Any]="1conv" , lowercase : List[Any]="pixelshuffle" , **lowercase : List[str] , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = embed_dim
_snake_case = depths
_snake_case = len(lowercase )
_snake_case = num_heads
_snake_case = window_size
_snake_case = mlp_ratio
_snake_case = qkv_bias
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = drop_path_rate
_snake_case = hidden_act
_snake_case = use_absolute_embeddings
_snake_case = layer_norm_eps
_snake_case = initializer_range
_snake_case = upscale
_snake_case = img_range
_snake_case = resi_connection
_snake_case = upsampler | 686 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Any = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : Union[str, Any] = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
_lowerCamelCase : List[str] = {
'''bert-base-uncased''': 512,
'''bert-large-uncased''': 512,
'''bert-base-cased''': 512,
'''bert-large-cased''': 512,
'''bert-base-multilingual-uncased''': 512,
'''bert-base-multilingual-cased''': 512,
'''bert-base-chinese''': 512,
'''bert-base-german-cased''': 512,
'''bert-large-uncased-whole-word-masking''': 512,
'''bert-large-cased-whole-word-masking''': 512,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 512,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 512,
'''bert-base-cased-finetuned-mrpc''': 512,
'''bert-base-german-dbmdz-cased''': 512,
'''bert-base-german-dbmdz-uncased''': 512,
'''TurkuNLP/bert-base-finnish-cased-v1''': 512,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 512,
'''wietsedv/bert-base-dutch-cased''': 512,
}
_lowerCamelCase : List[str] = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
_UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : int = BertTokenizer
def __init__( self : Tuple , lowercase : List[Any]=None , lowercase : str=None , lowercase : List[Any]=True , lowercase : str="[UNK]" , lowercase : str="[SEP]" , lowercase : Any="[PAD]" , lowercase : List[str]="[CLS]" , lowercase : Dict="[MASK]" , lowercase : int=True , lowercase : Union[str, Any]=None , **lowercase : int , ):
'''simple docstring'''
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
_snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowercase ) != tokenize_chinese_chars
):
_snake_case = getattr(lowercase , normalizer_state.pop('type' ) )
_snake_case = do_lower_case
_snake_case = strip_accents
_snake_case = tokenize_chinese_chars
_snake_case = normalizer_class(**lowercase )
_snake_case = do_lower_case
def A ( self : Optional[Any] , lowercase : Union[str, Any] , lowercase : Dict=None ):
'''simple docstring'''
_snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A ( self : Tuple , lowercase : List[int] , lowercase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : str , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
_snake_case = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase ) | 686 |
import random
def a_ ( __lowercase : str , __lowercase : Any , __lowercase : Any ) -> Optional[Any]:
_snake_case = a[left_index]
_snake_case = left_index + 1
for j in range(left_index + 1 , __lowercase ):
if a[j] < pivot:
_snake_case , _snake_case = a[i], a[j]
i += 1
_snake_case , _snake_case = a[i - 1], a[left_index]
return i - 1
def a_ ( __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Optional[int] ) -> Tuple:
if left < right:
_snake_case = random.randint(__lowercase , right - 1 )
_snake_case , _snake_case = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_snake_case = partition(__lowercase , __lowercase , __lowercase )
quick_sort_random(
__lowercase , __lowercase , __lowercase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__lowercase , pivot_index + 1 , __lowercase ) # recursive quicksort to the right of the pivot point
def a_ ( ) -> str:
_snake_case = input('Enter numbers separated by a comma:\n' ).strip()
_snake_case = [int(__lowercase ) for item in user_input.split(',' )]
quick_sort_random(__lowercase , 0 , len(__lowercase ) )
print(__lowercase )
if __name__ == "__main__":
main() | 686 | 1 |
def a_ ( __lowercase : str ) -> str:
_snake_case = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_snake_case = ''
_snake_case = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__lowercase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_snake_case , _snake_case = 0, 0
# length[i] shows the length of palindromic substring with center i
_snake_case = [1 for i in range(len(__lowercase ) )]
# for each character in new_string find corresponding palindromic string
_snake_case = 0
for j in range(len(__lowercase ) ):
_snake_case = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__lowercase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_snake_case = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_snake_case = j - k + 1 # noqa: E741
_snake_case = j + k - 1
# update max_length and start position
if max_length < length[j]:
_snake_case = length[j]
_snake_case = j
# create that string
_snake_case = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 |
import math
def a_ ( __lowercase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a_ ( __lowercase : float = 0.1 ) -> int:
_snake_case = 3
_snake_case = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__lowercase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 | 1 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_lowerCamelCase : int = re.compile(r'''\s+''')
def a_ ( __lowercase : List[Any] ) -> int:
return {"hash": hashlib.mda(re.sub(__lowercase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def a_ ( __lowercase : List[Any] ) -> Dict:
_snake_case = [len(__lowercase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(__lowercase ), "line_max": max(__lowercase )}
def a_ ( __lowercase : Optional[int] ) -> List[str]:
_snake_case = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] ) -> Optional[int]:
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def a_ ( __lowercase : Union[str, Any] , __lowercase : int=5 ) -> Optional[Any]:
_snake_case = ['auto-generated', 'autogenerated', 'automatically generated']
_snake_case = example['content'].splitlines()
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a_ ( __lowercase : List[Any] , __lowercase : int=5 , __lowercase : Tuple=0.0_5 ) -> Union[str, Any]:
_snake_case = ['unit tests', 'test file', 'configuration file']
_snake_case = example['content'].splitlines()
_snake_case = 0
_snake_case = 0
# first test
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_snake_case = example['content'].count('\n' )
_snake_case = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a_ ( __lowercase : Union[str, Any] ) -> Any:
_snake_case = ['def ', 'class ', 'for ', 'while ']
_snake_case = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a_ ( __lowercase : Tuple , __lowercase : Any=4 ) -> List[str]:
_snake_case = example['content'].splitlines()
_snake_case = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a_ ( __lowercase : Dict ) -> Dict:
_snake_case = tokenizer(example['content'] , truncation=__lowercase )['input_ids']
_snake_case = len(example['content'] ) / len(__lowercase )
return {"ratio": ratio}
def a_ ( __lowercase : Optional[Any] ) -> Any:
_snake_case = {}
results.update(get_hash(__lowercase ) )
results.update(line_stats(__lowercase ) )
results.update(alpha_stats(__lowercase ) )
results.update(char_token_ratio(__lowercase ) )
results.update(is_autogenerated(__lowercase ) )
results.update(is_config_or_test(__lowercase ) )
results.update(has_no_keywords(__lowercase ) )
results.update(has_few_assignments(__lowercase ) )
return results
def a_ ( __lowercase : Optional[int] , __lowercase : str , __lowercase : List[Any] ) -> int:
if not check_uniques(__lowercase , __lowercase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a_ ( __lowercase : Dict ) -> Dict:
with open(__lowercase , 'rb' ) as f_in:
with gzip.open(str(__lowercase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(__lowercase , __lowercase )
os.unlink(__lowercase )
# Settings
_lowerCamelCase : Dict = HfArgumentParser(PreprocessingArguments)
_lowerCamelCase : Dict = parser.parse_args()
if args.num_workers is None:
_lowerCamelCase : int = multiprocessing.cpu_count()
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_lowerCamelCase : Any = time.time()
_lowerCamelCase : Optional[Any] = load_dataset(args.dataset_name, split='''train''')
print(F'Time to load dataset: {time.time()-t_start:.2f}')
# Run preprocessing
_lowerCamelCase : Optional[int] = time.time()
_lowerCamelCase : Union[str, Any] = ds.map(preprocess, num_proc=args.num_workers)
print(F'Time to preprocess dataset: {time.time()-t_start:.2f}')
# Deduplicate hashes
_lowerCamelCase : List[Any] = set(ds.unique('''hash'''))
_lowerCamelCase : Dict = len(uniques) / len(ds)
print(F'Fraction of duplicates: {1-frac:.2%}')
# Deduplicate data and apply heuristics
_lowerCamelCase : List[Any] = time.time()
_lowerCamelCase : Optional[int] = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F'Time to filter dataset: {time.time()-t_start:.2f}')
print(F'Size of filtered dataset: {len(ds_filter)}')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_lowerCamelCase : Union[str, Any] = time.time()
_lowerCamelCase , _lowerCamelCase : Dict = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'Time to deduplicate dataset: {time.time()-t_start:.2f}')
print(F'Size of deduplicate dataset: {len(ds_filter)}')
# Save data in batches of samples_per_file
_lowerCamelCase : Optional[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
_lowerCamelCase : int = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
_lowerCamelCase : Union[str, Any] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_lowerCamelCase : Dict = str(data_dir / F'file-{file_number+1:012}.json')
_lowerCamelCase : str = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'Time to save dataset: {time.time()-t_start:.2f}') | 686 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = "resnet"
_UpperCAmelCase : Any = ["basic", "bottleneck"]
def __init__( self : Union[str, Any] , lowercase : Dict=3 , lowercase : Any=64 , lowercase : Any=[256, 512, 1_024, 2_048] , lowercase : Dict=[3, 4, 6, 3] , lowercase : Any="bottleneck" , lowercase : Optional[Any]="relu" , lowercase : Dict=False , lowercase : str=None , lowercase : Tuple=None , **lowercase : List[Any] , ):
'''simple docstring'''
super().__init__(**lowercase )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
_snake_case = num_channels
_snake_case = embedding_size
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = layer_type
_snake_case = hidden_act
_snake_case = downsample_in_first_stage
_snake_case = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(lowercase ) + 1 )]
_snake_case , _snake_case = get_aligned_output_features_output_indices(
out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = version.parse("1.11" )
@property
def A ( self : int ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
return 1E-3 | 686 | 1 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
_lowerCamelCase : Optional[Any] = logging.getLogger(__name__)
_lowerCamelCase : Union[str, Any] = tf.data.AUTOTUNE
def a_ ( ) -> List[Any]:
_snake_case = argparse.ArgumentParser(description='Train a masked language model on TPU.' )
parser.add_argument(
'--pretrained_model_config' , type=__lowercase , default='roberta-base' , help='The model config to use. Note that we don\'t copy the model\'s weights, only the config!' , )
parser.add_argument(
'--tokenizer' , type=__lowercase , default='unigram-tokenizer-wikitext' , help='The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.' , )
parser.add_argument(
'--per_replica_batch_size' , type=__lowercase , default=8 , help='Batch size per TPU core.' , )
parser.add_argument(
'--no_tpu' , action='store_true' , help='If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.' , )
parser.add_argument(
'--tpu_name' , type=__lowercase , help='Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.' , default='local' , )
parser.add_argument(
'--tpu_zone' , type=__lowercase , help='Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.' , )
parser.add_argument(
'--gcp_project' , type=__lowercase , help='Google cloud project name. Only used for non-Colab TPU nodes.' )
parser.add_argument(
'--bfloat16' , action='store_true' , help='Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.' , )
parser.add_argument(
'--train_dataset' , type=__lowercase , help='Path to training dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--shuffle_buffer_size' , type=__lowercase , default=2**18 , help='Size of the shuffle buffer (in samples)' , )
parser.add_argument(
'--eval_dataset' , type=__lowercase , help='Path to evaluation dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--num_epochs' , type=__lowercase , default=1 , help='Number of epochs to train for.' , )
parser.add_argument(
'--learning_rate' , type=__lowercase , default=1E-4 , help='Learning rate to use for training.' , )
parser.add_argument(
'--weight_decay_rate' , type=__lowercase , default=1E-3 , help='Weight decay rate to use for training.' , )
parser.add_argument(
'--max_length' , type=__lowercase , default=512 , help='Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py' , )
parser.add_argument(
'--mlm_probability' , type=__lowercase , default=0.1_5 , help='Fraction of tokens to mask during training.' , )
parser.add_argument('--output_dir' , type=__lowercase , required=__lowercase , help='Path to save model checkpoints to.' )
parser.add_argument('--hub_model_id' , type=__lowercase , help='Model ID to upload to on the Hugging Face Hub.' )
_snake_case = parser.parse_args()
return args
def a_ ( __lowercase : Dict ) -> List[str]:
try:
if args.tpu_name:
_snake_case = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
_snake_case = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '
'--gcp_project. When running on a TPU VM, use --tpu_name local.' )
tf.config.experimental_connect_to_cluster(__lowercase )
tf.tpu.experimental.initialize_tpu_system(__lowercase )
return tpu
def a_ ( __lowercase : Optional[int] ) -> str:
_snake_case = 0
for file in file_list:
_snake_case = file.split('/' )[-1]
_snake_case = re.search(r'-\d+-(\d+)\.tfrecord' , __lowercase ).group(1 )
_snake_case = int(__lowercase )
num_samples += sample_count
return num_samples
def a_ ( __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : Optional[Any] , __lowercase : List[str]=None ) -> Union[str, Any]:
_snake_case = count_samples(__lowercase )
_snake_case = tf.data.Dataset.from_tensor_slices(__lowercase )
if shuffle:
_snake_case = dataset.shuffle(len(__lowercase ) )
_snake_case = tf.data.TFRecordDataset(__lowercase , num_parallel_reads=__lowercase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
_snake_case = dataset.apply(tf.data.experimental.assert_cardinality(__lowercase ) )
_snake_case = dataset.map(__lowercase , num_parallel_calls=__lowercase )
if shuffle:
assert shuffle_buffer_size is not None
_snake_case = dataset.shuffle(args.shuffle_buffer_size )
_snake_case = dataset.batch(__lowercase , drop_remainder=__lowercase )
_snake_case = dataset.map(__lowercase , num_parallel_calls=__lowercase )
_snake_case = dataset.prefetch(__lowercase )
return dataset
def a_ ( __lowercase : int ) -> List[Any]:
if not args.no_tpu:
_snake_case = initialize_tpu(__lowercase )
_snake_case = tf.distribute.TPUStrategy(__lowercase )
else:
_snake_case = tf.distribute.OneDeviceStrategy(device='/gpu:0' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16' )
_snake_case = AutoTokenizer.from_pretrained(args.tokenizer )
_snake_case = AutoConfig.from_pretrained(args.pretrained_model_config )
_snake_case = tokenizer.vocab_size
_snake_case = tf.io.gfile.glob(os.path.join(args.train_dataset , '*.tfrecord' ) )
if not training_records:
raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' )
_snake_case = tf.io.gfile.glob(os.path.join(args.eval_dataset , '*.tfrecord' ) )
if not eval_records:
raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' )
_snake_case = count_samples(__lowercase )
_snake_case = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
_snake_case = steps_per_epoch * args.num_epochs
with strategy.scope():
_snake_case = TFAutoModelForMaskedLM.from_config(__lowercase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
_snake_case , _snake_case = create_optimizer(
num_train_steps=__lowercase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__lowercase , metrics=['accuracy'] )
def decode_fn(__lowercase : Optional[Any] ):
_snake_case = {
'input_ids': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'attention_mask': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__lowercase , __lowercase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
_snake_case = DataCollatorForLanguageModeling(
tokenizer=__lowercase , mlm_probability=args.mlm_probability , mlm=__lowercase , return_tensors='tf' )
def mask_with_collator(__lowercase : Optional[int] ):
# TF really needs an isin() function
_snake_case = (
~tf.cast(batch['attention_mask'] , tf.bool )
| (batch['input_ids'] == tokenizer.cls_token_id)
| (batch['input_ids'] == tokenizer.sep_token_id)
)
_snake_case , _snake_case = data_collator.tf_mask_tokens(
batch['input_ids'] , vocab_size=len(__lowercase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=__lowercase , )
return batch
_snake_case = args.per_replica_batch_size * strategy.num_replicas_in_sync
_snake_case = prepare_dataset(
__lowercase , decode_fn=__lowercase , mask_fn=__lowercase , batch_size=__lowercase , shuffle=__lowercase , shuffle_buffer_size=args.shuffle_buffer_size , )
_snake_case = prepare_dataset(
__lowercase , decode_fn=__lowercase , mask_fn=__lowercase , batch_size=__lowercase , shuffle=__lowercase , )
_snake_case = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=__lowercase ) )
model.fit(
__lowercase , validation_data=__lowercase , epochs=args.num_epochs , callbacks=__lowercase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = parse_args()
main(args) | 686 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : List[Any] , lowercase : Union[str, Any] , lowercase : int ):
'''simple docstring'''
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(lowercase ) for s in shape] )}.npy'''
def A ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def A ( self : List[Any] , lowercase : Tuple=0 , lowercase : Optional[int]=(4, 4, 64, 64) , lowercase : Optional[int]=False ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase )
return image
def A ( self : Tuple , lowercase : Any=False , lowercase : Union[str, Any]="CompVis/stable-diffusion-v1-4" ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = 'bf16' if fpaa else None
_snake_case , _snake_case = FlaxUNetaDConditionModel.from_pretrained(
lowercase , subfolder='unet' , dtype=lowercase , revision=lowercase )
return model, params
def A ( self : Union[str, Any] , lowercase : str=0 , lowercase : Optional[Any]=(4, 77, 768) , lowercase : int=False ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def A ( self : Tuple , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : List[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=lowercase )
_snake_case = self.get_latents(lowercase , fpaa=lowercase )
_snake_case = self.get_encoder_hidden_states(lowercase , fpaa=lowercase )
_snake_case = model.apply(
{'params': params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
_snake_case = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_snake_case = jnp.array(lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def A ( self : str , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : List[str] ):
'''simple docstring'''
_snake_case , _snake_case = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=lowercase )
_snake_case = self.get_latents(lowercase , shape=(4, 4, 96, 96) , fpaa=lowercase )
_snake_case = self.get_encoder_hidden_states(lowercase , shape=(4, 77, 1_024) , fpaa=lowercase )
_snake_case = model.apply(
{'params': params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
_snake_case = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_snake_case = jnp.array(lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1E-2 ) | 686 | 1 |
from __future__ import annotations
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : list[list[int]] ):
'''simple docstring'''
_snake_case = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(lowercase ) != 0:
_snake_case = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowercase ) != cols:
raise error
for value in row:
if not isinstance(lowercase , (int, float) ):
raise error
_snake_case = rows
else:
_snake_case = []
def A ( self : Any ):
'''simple docstring'''
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def A ( self : Tuple ):
'''simple docstring'''
return len(self.rows )
@property
def A ( self : List[str] ):
'''simple docstring'''
return len(self.rows[0] )
@property
def A ( self : int ):
'''simple docstring'''
return (self.num_rows, self.num_columns)
@property
def A ( self : List[str] ):
'''simple docstring'''
return self.order[0] == self.order[1]
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowercase )
def A ( self : Dict ):
'''simple docstring'''
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def A ( self : Optional[int] ):
'''simple docstring'''
return bool(self.determinant() )
def A ( self : Union[str, Any] , lowercase : int , lowercase : int ):
'''simple docstring'''
_snake_case = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowercase ).determinant()
def A ( self : Optional[Any] , lowercase : int , lowercase : int ):
'''simple docstring'''
if (row + column) % 2 == 0:
return self.get_minor(lowercase , lowercase )
return -1 * self.get_minor(lowercase , lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
return Matrix(
[
[self.get_minor(lowercase , lowercase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def A ( self : List[str] ):
'''simple docstring'''
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self : List[str] ):
'''simple docstring'''
return str(self.rows )
def __str__( self : str ):
'''simple docstring'''
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(lowercase ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def A ( self : List[Any] , lowercase : list[int] , lowercase : int | None = None ):
'''simple docstring'''
_snake_case = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(lowercase , lowercase ):
raise type_error
for value in row:
if not isinstance(lowercase , (int, float) ):
raise type_error
if len(lowercase ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(lowercase )
else:
_snake_case = self.rows[0:position] + [row] + self.rows[position:]
def A ( self : Optional[Any] , lowercase : list[int] , lowercase : int | None = None ):
'''simple docstring'''
_snake_case = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(lowercase , lowercase ):
raise type_error
for value in column:
if not isinstance(lowercase , (int, float) ):
raise type_error
if len(lowercase ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
_snake_case = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
_snake_case = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : List[Any] , lowercase : object ):
'''simple docstring'''
if not isinstance(lowercase , lowercase ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any , lowercase : object ):
'''simple docstring'''
return not self == other
def __neg__( self : List[Any] ):
'''simple docstring'''
return self * -1
def __add__( self : str , lowercase : Matrix ):
'''simple docstring'''
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Dict , lowercase : Matrix ):
'''simple docstring'''
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : Optional[int] , lowercase : Matrix | int | float ):
'''simple docstring'''
if isinstance(lowercase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowercase , lowercase ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(lowercase , lowercase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self : Any , lowercase : int ):
'''simple docstring'''
if not isinstance(lowercase , lowercase ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
_snake_case = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def A ( cls : Tuple , lowercase : list[int] , lowercase : list[int] ):
'''simple docstring'''
return sum(row[i] * column[i] for i in range(len(lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( __lowercase : Any ) -> List[Any]:
_snake_case = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a_ ( __lowercase : Dict ) -> Tuple:
_snake_case , _snake_case = emb.weight.shape
_snake_case = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_snake_case = emb.weight.data
return lin_layer
def a_ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None ) -> Tuple:
_snake_case = {}
for old_key in state_dict.keys():
_snake_case = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_snake_case = key.replace('moe_layer.experts.0' , f'''ffn.experts.expert_{expert_idx}''' )
else:
_snake_case = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
_snake_case = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
_snake_case = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
_snake_case = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
_snake_case = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
_snake_case = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
_snake_case = key.replace('final_layer_norm' , 'ff_layer_norm' )
_snake_case = state_dict[old_key]
return new_dict
def a_ ( __lowercase : Optional[Any] , __lowercase : Tuple , __lowercase : Any , __lowercase : List[str] , __lowercase : str = WEIGHTS_NAME ) -> Union[str, Any]:
_snake_case = []
_snake_case = 0
os.makedirs(__lowercase , exist_ok=__lowercase )
for expert in range(__lowercase ):
_snake_case = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(__lowercase ):
_snake_case = torch.load(__lowercase )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = os.path.join(
__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
torch.save(__lowercase , __lowercase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__lowercase )[0]].dtype )
# Add the last block
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
_snake_case = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__lowercase ) == 1:
_snake_case = os.path.join(__lowercase , __lowercase )
torch.save(__lowercase , __lowercase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__lowercase , __lowercase )
# Otherwise, let's build the index
_snake_case = {}
for idx, shard in enumerate(__lowercase ):
_snake_case = weights_name.replace('.bin' , f'''-{idx+1:05d}-of-{len(__lowercase ):05d}.bin''' )
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__lowercase , os.path.join(__lowercase , __lowercase ) )
for key in shard:
_snake_case = shard_file
# Add the metadata
_snake_case = {'total_size': total_size}
_snake_case = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(__lowercase , __lowercase ) , 'w' , encoding='utf-8' ) as f:
_snake_case = json.dumps(__lowercase , indent=2 , sort_keys=__lowercase ) + '\n'
f.write(__lowercase )
return metadata, index
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
_lowerCamelCase : List[str] = parser.parse_args()
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_lowerCamelCase : Tuple = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_lowerCamelCase : Dict = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path) | 686 | 1 |
import torch
from diffusers import StableDiffusionPipeline
_lowerCamelCase : Optional[Any] = '''path-to-your-trained-model'''
_lowerCamelCase : Dict = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
_lowerCamelCase : Dict = '''A photo of sks dog in a bucket'''
_lowerCamelCase : Any = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''') | 686 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_lowerCamelCase : List[Any] = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_lowerCamelCase : Any = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_lowerCamelCase : Union[str, Any] = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def a_ ( __lowercase : List[Any] , __lowercase : Any ) -> Union[str, Any]:
return float((preds == labels).mean() )
def a_ ( __lowercase : Optional[Any] , __lowercase : List[str] ) -> Dict:
_snake_case = simple_accuracy(__lowercase , __lowercase )
_snake_case = float(fa_score(y_true=__lowercase , y_pred=__lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( __lowercase : int , __lowercase : str ) -> str:
_snake_case = float(pearsonr(__lowercase , __lowercase )[0] )
_snake_case = float(spearmanr(__lowercase , __lowercase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def A ( self : List[Any] , lowercase : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowercase , lowercase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowercase , lowercase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowercase , lowercase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' ) | 686 | 1 |
from __future__ import annotations
from math import pi, sqrt
def a_ ( __lowercase : float , __lowercase : float ) -> tuple:
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_lowerCamelCase : Dict = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = "sequence-classification"
def __init__( self : Optional[int] , lowercase : Any ):
'''simple docstring'''
if type(lowercase ) == dict:
_snake_case = Namespace(**lowercase )
_snake_case = glue_output_modes[hparams.task]
_snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(lowercase , lowercase , self.mode )
def A ( self : Optional[Any] , **lowercase : Optional[Any] ):
'''simple docstring'''
return self.model(**lowercase )
def A ( self : Optional[Any] , lowercase : str , lowercase : Tuple ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case = outputs[0]
_snake_case = self.trainer.lr_schedulers[0]['scheduler']
_snake_case = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.hparams
_snake_case = processors[args.task]()
_snake_case = processor.get_labels()
for mode in ["train", "dev"]:
_snake_case = self._feature_file(lowercase )
if os.path.exists(lowercase ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , lowercase )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_snake_case = convert_examples_to_features(
lowercase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , lowercase )
torch.save(lowercase , lowercase )
def A ( self : Dict , lowercase : str , lowercase : int , lowercase : bool = False ):
'''simple docstring'''
_snake_case = 'dev' if mode == 'test' else mode
_snake_case = self._feature_file(lowercase )
logger.info('Loading features from cached file %s' , lowercase )
_snake_case = torch.load(lowercase )
_snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(lowercase , lowercase , lowercase , lowercase ) , batch_size=lowercase , shuffle=lowercase , )
def A ( self : str , lowercase : Optional[Any] , lowercase : str ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case , _snake_case = outputs[:2]
_snake_case = logits.detach().cpu().numpy()
_snake_case = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : int , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_snake_case = np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_snake_case = np.argmax(lowercase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_snake_case = np.squeeze(lowercase )
_snake_case = np.concatenate([x['target'] for x in outputs] , axis=0 )
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , lowercase , lowercase )}
_snake_case = dict(results.items() )
_snake_case = results
return ret, preds_list, out_label_list
def A ( self : int , lowercase : list ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : List[str] , lowercase : Any ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( lowercase : Tuple , lowercase : Any ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(lowercase , lowercase )
parser.add_argument(
'--max_seq_length' , default=128 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=lowercase , required=lowercase , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=lowercase , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def a_ ( ) -> Union[str, Any]:
_snake_case = argparse.ArgumentParser()
add_generic_args(__lowercase , os.getcwd() )
_snake_case = GLUETransformer.add_model_specific_args(__lowercase , os.getcwd() )
_snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_snake_case = os.path.join(
'./results' , f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_snake_case = GLUETransformer(__lowercase )
_snake_case = generic_train(__lowercase , __lowercase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_snake_case = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=__lowercase ) )
_snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__lowercase )
if __name__ == "__main__":
main() | 686 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Tuple , lowercase : Tuple , lowercase : Union[str, Any]=2 , lowercase : int=True , lowercase : Union[str, Any]=False , lowercase : Any=10 , lowercase : List[Any]=3 , lowercase : str=32 * 8 , lowercase : Tuple=32 * 8 , lowercase : Dict=4 , lowercase : Optional[int]=64 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = is_training
_snake_case = use_auxiliary_loss
_snake_case = num_queries
_snake_case = num_channels
_snake_case = min_size
_snake_case = max_size
_snake_case = num_labels
_snake_case = hidden_dim
_snake_case = hidden_dim
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowercase )
_snake_case = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowercase )
_snake_case = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowercase ) > 0.5
).float()
_snake_case = (torch.rand((self.batch_size, self.num_labels) , device=lowercase ) > 0.5).long()
_snake_case = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A ( self : int ):
'''simple docstring'''
_snake_case = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_snake_case = self.num_queries
_snake_case = self.num_labels
_snake_case = [1, 1, 1, 1]
_snake_case = self.num_channels
_snake_case = 64
_snake_case = 128
_snake_case = self.hidden_dim
_snake_case = self.hidden_dim
_snake_case = self.hidden_dim
return config
def A ( self : Any ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.prepare_config_and_inputs()
_snake_case = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def A ( self : Tuple , lowercase : str , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = output.encoder_hidden_states
_snake_case = output.pixel_decoder_hidden_states
_snake_case = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowercase ) , config.decoder_layers )
def A ( self : Tuple , lowercase : List[Any] , lowercase : Any , lowercase : List[str] , lowercase : Dict=False ):
'''simple docstring'''
with torch.no_grad():
_snake_case = MaskaFormerModel(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(pixel_values=lowercase , pixel_mask=lowercase )
_snake_case = model(lowercase , output_hidden_states=lowercase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowercase , lowercase )
def A ( self : Union[str, Any] , lowercase : List[str] , lowercase : Tuple , lowercase : Tuple , lowercase : Union[str, Any] , lowercase : Tuple ):
'''simple docstring'''
_snake_case = MaskaFormerForUniversalSegmentation(config=lowercase )
model.to(lowercase )
model.eval()
def comm_check_on_output(lowercase : Union[str, Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_snake_case = model(pixel_values=lowercase , pixel_mask=lowercase )
_snake_case = model(lowercase )
comm_check_on_output(lowercase )
_snake_case = model(
pixel_values=lowercase , pixel_mask=lowercase , mask_labels=lowercase , class_labels=lowercase )
comm_check_on_output(lowercase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Any = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_UpperCAmelCase : Dict = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
_UpperCAmelCase : str = False
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : int = False
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = MaskaFormerModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def A ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowercase , **lowercase , output_hidden_states=lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowercase )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def A ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def A ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def A ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def A ( self : Optional[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def A ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowercase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
@slow
def A ( self : str ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_snake_case = MaskaFormerModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = (self.model_tester.min_size,) * 2
_snake_case = {
'pixel_values': torch.randn((2, 3, *size) , device=lowercase ),
'mask_labels': torch.randn((2, 10, *size) , device=lowercase ),
'class_labels': torch.zeros(2 , 10 , device=lowercase ).long(),
}
_snake_case = self.model_tester.get_config()
_snake_case = MaskaFormerForUniversalSegmentation(lowercase ).to(lowercase )
_snake_case = model(**lowercase )
self.assertTrue(outputs.loss is not None )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowercase , **lowercase , output_hidden_states=lowercase )
def A ( self : Any ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowercase ).to(lowercase )
_snake_case = model(**lowercase , output_attentions=lowercase )
self.assertTrue(outputs.attentions is not None )
def A ( self : Tuple ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_snake_case = self.all_model_classes[1]
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs()
_snake_case = model_class(lowercase )
model.to(lowercase )
model.train()
_snake_case = model(lowercase , mask_labels=lowercase , class_labels=lowercase ).loss
loss.backward()
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.all_model_classes[1]
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs()
_snake_case = True
_snake_case = True
_snake_case = model_class(lowercase ).to(lowercase )
model.train()
_snake_case = model(lowercase , mask_labels=lowercase , class_labels=lowercase )
_snake_case = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_snake_case = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_snake_case = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_snake_case = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowercase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowerCamelCase : Tuple = 1E-4
def a_ ( ) -> List[str]:
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A ( self : str ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def A ( self : List[str] ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def A ( self : Dict ):
'''simple docstring'''
_snake_case = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowercase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(lowercase , return_tensors='pt' ).to(lowercase )
_snake_case = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase , (1, 3, 384, 384) )
with torch.no_grad():
_snake_case = model(**lowercase )
_snake_case = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(lowercase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowercase , atol=lowercase ) )
_snake_case = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(lowercase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowercase , atol=lowercase ) )
_snake_case = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(lowercase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowercase , atol=lowercase ) )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowercase ).eval()
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(lowercase , return_tensors='pt' ).to(lowercase )
_snake_case = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase , (1, 3, 384, 384) )
with torch.no_grad():
_snake_case = model(**lowercase )
# masks_queries_logits
_snake_case = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_snake_case = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
_snake_case = torch.tensor(lowercase ).to(lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowercase , atol=lowercase ) )
# class_queries_logits
_snake_case = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_snake_case = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase , atol=lowercase ) )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowercase ).eval()
_snake_case = self.default_image_processor
_snake_case = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
_snake_case = inputs['pixel_values'].to(lowercase )
_snake_case = [el.to(lowercase ) for el in inputs['mask_labels']]
_snake_case = [el.to(lowercase ) for el in inputs['class_labels']]
with torch.no_grad():
_snake_case = model(**lowercase )
self.assertTrue(outputs.loss is not None ) | 686 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = LEDConfig
_UpperCAmelCase : int = {}
_UpperCAmelCase : List[str] = "gelu"
def __init__( self : Union[str, Any] , lowercase : Optional[int] , lowercase : Dict=13 , lowercase : Dict=7 , lowercase : Tuple=True , lowercase : Dict=False , lowercase : Dict=99 , lowercase : Any=32 , lowercase : List[Any]=2 , lowercase : List[str]=4 , lowercase : List[str]=37 , lowercase : Dict=0.1 , lowercase : int=0.1 , lowercase : List[Any]=20 , lowercase : int=2 , lowercase : Optional[Any]=1 , lowercase : List[str]=0 , lowercase : Optional[int]=4 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = eos_token_id
_snake_case = pad_token_id
_snake_case = bos_token_id
_snake_case = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case = prepare_led_inputs_dict(lowercase , lowercase , lowercase )
_snake_case = tf.concat(
[tf.zeros_like(lowercase )[:, :-1], tf.ones_like(lowercase )[:, -1:]] , axis=-1 , )
_snake_case = global_attention_mask
return config, inputs_dict
def A ( self : str , lowercase : str , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = TFLEDModel(config=lowercase ).get_decoder()
_snake_case = inputs_dict['input_ids']
_snake_case = input_ids[:1, :]
_snake_case = inputs_dict['attention_mask'][:1, :]
_snake_case = 1
# first forward pass
_snake_case = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
_snake_case , _snake_case = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case = model(lowercase , attention_mask=lowercase )[0]
_snake_case = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case = output_from_no_past[:, -3:, random_slice_idx]
_snake_case = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : str=None , ) -> Union[str, Any]:
if attention_mask is None:
_snake_case = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_UpperCAmelCase : Optional[int] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_UpperCAmelCase : Tuple = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCAmelCase : str = True
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : str = False
_UpperCAmelCase : List[Any] = False
def A ( self : Any ):
'''simple docstring'''
_snake_case = TFLEDModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = tf.zeros_like(inputs_dict['attention_mask'] )
_snake_case = 2
_snake_case = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
_snake_case = True
_snake_case = self.model_tester.seq_length
_snake_case = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase : List[str] ):
_snake_case = outputs.decoder_attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase : List[str] ):
_snake_case = [t.numpy() for t in outputs.encoder_attentions]
_snake_case = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = False
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
_snake_case = len(lowercase )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
if self.is_encoder_decoder:
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_decoder_attentions_output(lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
# Check attention is always last and order is fine
_snake_case = True
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase ) )
self.assertEqual(model.config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def A ( self : List[Any] ):
'''simple docstring'''
pass
def A ( self : Any ):
'''simple docstring'''
pass
def a_ ( __lowercase : str ) -> Optional[Any]:
return tf.constant(__lowercase , dtype=tf.intaa )
_lowerCamelCase : List[Any] = 1E-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 )
def A ( self : str ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 , rtol=1E-3 ) | 686 | 1 |
import torch
from diffusers import DiffusionPipeline
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : Union[str, Any] , lowercase : List[Any] ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowercase , scheduler=lowercase )
def __call__( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
_snake_case = 1
_snake_case = self.unet(lowercase , lowercase ).sample
_snake_case = self.scheduler.step(lowercase , lowercase , lowercase ).prev_sample
_snake_case = scheduler_output - scheduler_output + torch.ones_like(lowercase )
return result | 686 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_lowerCamelCase : Union[str, Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_lowerCamelCase : Union[str, Any] = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
_lowerCamelCase : Optional[int] = '''zero2'''
_lowerCamelCase : List[Any] = '''zero3'''
_lowerCamelCase : Dict = [ZEROa, ZEROa]
def a_ ( __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Tuple ) -> Dict:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_snake_case = parameterized.to_safe_name('_'.join(str(__lowercase ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
_lowerCamelCase : Dict = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : List[Any] , lowercase : Dict ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Any , lowercase : str , lowercase : List[str] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : Optional[Any] , lowercase : Optional[int] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
def A ( self : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
pass
def A ( self : str , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : bool = True , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = models[model]
_snake_case = self.run_trainer(
stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , )
self.do_checks(lowercase )
return output_dir
def A ( self : Any , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : int = 1 , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase )
_snake_case = f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_snake_case = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_snake_case = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_snake_case = self.get_launcher(lowercase )
_snake_case = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase , env=self.get_env() )
return output_dir
def A ( self : List[str] , lowercase : Any=False ):
'''simple docstring'''
_snake_case = min(2 , get_gpu_count() ) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split() | 686 | 1 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowerCamelCase : Tuple = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def a_ ( __lowercase : np.ndarray , __lowercase : float , __lowercase : int = 16_000 ) -> Union[str, Any]:
_snake_case = int(round(sample_rate * max_length ) )
if len(__lowercase ) <= sample_length:
return wav
_snake_case = randint(0 , len(__lowercase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : Optional[str] = field(default=UpperCAmelCase ,metadata={"help": "Name of a dataset from the datasets package"} )
_UpperCAmelCase : Optional[str] = field(
default=UpperCAmelCase ,metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
_UpperCAmelCase : Optional[str] = field(
default=UpperCAmelCase ,metadata={"help": "A file containing the training audio paths and labels."} )
_UpperCAmelCase : Optional[str] = field(
default=UpperCAmelCase ,metadata={"help": "A file containing the validation audio paths and labels."} )
_UpperCAmelCase : str = field(
default="train" ,metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} ,)
_UpperCAmelCase : str = field(
default="validation" ,metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
)
} ,)
_UpperCAmelCase : str = field(
default="audio" ,metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} ,)
_UpperCAmelCase : str = field(
default="label" ,metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} )
_UpperCAmelCase : Optional[int] = field(
default=UpperCAmelCase ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} ,)
_UpperCAmelCase : Optional[int] = field(
default=UpperCAmelCase ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} ,)
_UpperCAmelCase : float = field(
default=2_0 ,metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} ,)
@dataclass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : str = field(
default="facebook/wav2vec2-base" ,metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ,)
_UpperCAmelCase : Optional[str] = field(
default=UpperCAmelCase ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCAmelCase : Optional[str] = field(
default=UpperCAmelCase ,metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
_UpperCAmelCase : str = field(
default="main" ,metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} ,)
_UpperCAmelCase : Optional[str] = field(
default=UpperCAmelCase ,metadata={"help": "Name or path of preprocessor config."} )
_UpperCAmelCase : bool = field(
default=UpperCAmelCase ,metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
_UpperCAmelCase : bool = field(
default=UpperCAmelCase ,metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
_UpperCAmelCase : bool = field(
default=UpperCAmelCase ,metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} ,)
_UpperCAmelCase : Optional[bool] = field(
default=UpperCAmelCase ,metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
_UpperCAmelCase : bool = field(
default=UpperCAmelCase ,metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} ,)
def A ( self : List[str] ):
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'The argument `--freeze_feature_extractor` is deprecated and '
'will be removed in a future version. Use `--freeze_feature_encoder`'
'instead. Setting `freeze_feature_encoder==True`.' , lowercase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'The argument `--freeze_feature_extractor` is deprecated and '
'should not be used in combination with `--freeze_feature_encoder`.'
'Only make use of `--freeze_feature_encoder`.' )
def a_ ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_snake_case , _snake_case , _snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_snake_case , _snake_case , _snake_case = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_audio_classification' , __lowercase , __lowercase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_snake_case = training_args.get_process_log_level()
logger.setLevel(__lowercase )
transformers.utils.logging.set_verbosity(__lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_snake_case = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_snake_case = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to train from scratch.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset and prepare it for the audio classification task.
_snake_case = DatasetDict()
_snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
_snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'''--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
'Make sure to set `--audio_column_name` to the correct audio column - one of '
f'''{', '.join(raw_datasets['train'].column_names )}.''' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'''--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
'Make sure to set `--label_column_name` to the correct text column - one of '
f'''{', '.join(raw_datasets['train'].column_names )}.''' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_snake_case = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_snake_case = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_snake_case = feature_extractor.model_input_names[0]
def train_transforms(__lowercase : Any ):
_snake_case = []
for audio in batch[data_args.audio_column_name]:
_snake_case = random_subsample(
audio['array'] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(__lowercase )
_snake_case = feature_extractor(__lowercase , sampling_rate=feature_extractor.sampling_rate )
_snake_case = {model_input_name: inputs.get(__lowercase )}
_snake_case = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(__lowercase : Tuple ):
_snake_case = [audio['array'] for audio in batch[data_args.audio_column_name]]
_snake_case = feature_extractor(__lowercase , sampling_rate=feature_extractor.sampling_rate )
_snake_case = {model_input_name: inputs.get(__lowercase )}
_snake_case = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_snake_case = raw_datasets['train'].features[data_args.label_column_name].names
_snake_case , _snake_case = {}, {}
for i, label in enumerate(__lowercase ):
_snake_case = str(__lowercase )
_snake_case = label
# Load the accuracy metric from the datasets package
_snake_case = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(__lowercase : Optional[int] ):
_snake_case = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=__lowercase , references=eval_pred.label_ids )
_snake_case = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowercase ) , labelaid=__lowercase , idalabel=__lowercase , finetuning_task='audio-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_snake_case = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_snake_case = (
raw_datasets['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(__lowercase , output_all_columns=__lowercase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_snake_case = (
raw_datasets['eval'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(__lowercase , output_all_columns=__lowercase )
# Initialize our trainer
_snake_case = Trainer(
model=__lowercase , args=__lowercase , train_dataset=raw_datasets['train'] if training_args.do_train else None , eval_dataset=raw_datasets['eval'] if training_args.do_eval else None , compute_metrics=__lowercase , tokenizer=__lowercase , )
# Training
if training_args.do_train:
_snake_case = None
if training_args.resume_from_checkpoint is not None:
_snake_case = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_snake_case = last_checkpoint
_snake_case = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_snake_case = trainer.evaluate()
trainer.log_metrics('eval' , __lowercase )
trainer.save_metrics('eval' , __lowercase )
# Write model card and (optionally) push to hub
_snake_case = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'audio-classification',
'dataset': data_args.dataset_name,
'tags': ['audio-classification'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowercase )
else:
trainer.create_model_card(**__lowercase )
if __name__ == "__main__":
main() | 686 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCamelCase : int = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 1 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def a_ ( __lowercase : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : Any ) -> str:
_snake_case = os.path.abspath(__lowercase )
logger.info(f'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
_snake_case = tf.train.list_variables(__lowercase )
_snake_case = []
_snake_case = []
_snake_case = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
_snake_case = full_name.split('/' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(f'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
_snake_case = name[1:]
# figure out how many levels deep the name is
_snake_case = 0
for _name in name:
if _name.startswith('layer_with_weights' ):
depth += 1
else:
break
layer_depth.append(__lowercase )
# read data
_snake_case = tf.train.load_variable(__lowercase , __lowercase )
names.append('/'.join(__lowercase ) )
arrays.append(__lowercase )
logger.info(f'''Read a total of {len(__lowercase ):,} layers''' )
# Sanity check
if len(set(__lowercase ) ) != 1:
raise ValueError(f'''Found layer names with different depths (layer depth {list(set(__lowercase ) )})''' )
_snake_case = list(set(__lowercase ) )[0]
if layer_depth != 1:
raise ValueError(
'The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'
' heads.' )
# convert layers
logger.info('Converting weights...' )
for full_name, array in zip(__lowercase , __lowercase ):
_snake_case = full_name.split('/' )
_snake_case = model
_snake_case = []
for i, m_name in enumerate(__lowercase ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('layer_with_weights' ):
_snake_case = int(m_name.split('-' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['embeddings', 'LayerNorm'] )
_snake_case = getattr(__lowercase , 'embeddings' )
_snake_case = getattr(__lowercase , 'LayerNorm' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['encoder', 'layer', str(layer_num - 4 )] )
_snake_case = getattr(__lowercase , 'encoder' )
_snake_case = getattr(__lowercase , 'layer' )
_snake_case = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['pooler', 'dense'] )
_snake_case = getattr(__lowercase , 'pooler' )
_snake_case = getattr(__lowercase , 'dense' )
elif m_name == "embeddings":
trace.append('embeddings' )
_snake_case = getattr(__lowercase , 'embeddings' )
if layer_num == 0:
trace.append('word_embeddings' )
_snake_case = getattr(__lowercase , 'word_embeddings' )
elif layer_num == 1:
trace.append('position_embeddings' )
_snake_case = getattr(__lowercase , 'position_embeddings' )
elif layer_num == 2:
trace.append('token_type_embeddings' )
_snake_case = getattr(__lowercase , 'token_type_embeddings' )
else:
raise ValueError(f'''Unknown embedding layer with name {full_name}''' )
trace.append('weight' )
_snake_case = getattr(__lowercase , 'weight' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['attention', 'self'] )
_snake_case = getattr(__lowercase , 'attention' )
_snake_case = getattr(__lowercase , 'self' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['attention', 'output', 'LayerNorm'] )
_snake_case = getattr(__lowercase , 'attention' )
_snake_case = getattr(__lowercase , 'output' )
_snake_case = getattr(__lowercase , 'LayerNorm' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['attention', 'output', 'dense'] )
_snake_case = getattr(__lowercase , 'attention' )
_snake_case = getattr(__lowercase , 'output' )
_snake_case = getattr(__lowercase , 'dense' )
elif m_name == "_output_dense":
# output dense
trace.extend(['output', 'dense'] )
_snake_case = getattr(__lowercase , 'output' )
_snake_case = getattr(__lowercase , 'dense' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['output', 'LayerNorm'] )
_snake_case = getattr(__lowercase , 'output' )
_snake_case = getattr(__lowercase , 'LayerNorm' )
elif m_name == "_key_dense":
# attention key
trace.append('key' )
_snake_case = getattr(__lowercase , 'key' )
elif m_name == "_query_dense":
# attention query
trace.append('query' )
_snake_case = getattr(__lowercase , 'query' )
elif m_name == "_value_dense":
# attention value
trace.append('value' )
_snake_case = getattr(__lowercase , 'value' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['intermediate', 'dense'] )
_snake_case = getattr(__lowercase , 'intermediate' )
_snake_case = getattr(__lowercase , 'dense' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('output' )
_snake_case = getattr(__lowercase , 'output' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('bias' )
_snake_case = getattr(__lowercase , 'bias' )
elif m_name in ["kernel", "gamma"]:
trace.append('weight' )
_snake_case = getattr(__lowercase , 'weight' )
else:
logger.warning(f'''Ignored {m_name}''' )
# for certain layers reshape is necessary
_snake_case = '.'.join(__lowercase )
if re.match(r'(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)' , __lowercase ) or re.match(
r'(\S+)\.attention\.output\.dense\.weight' , __lowercase ):
_snake_case = array.reshape(pointer.data.shape )
if "kernel" in full_name:
_snake_case = array.transpose()
if pointer.shape == array.shape:
_snake_case = torch.from_numpy(__lowercase )
else:
raise ValueError(
f'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
f''' {array.shape}''' )
logger.info(f'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def a_ ( __lowercase : str , __lowercase : Any , __lowercase : Optional[int] ) -> Optional[Any]:
# Instantiate model
logger.info(f'''Loading model based on config from {config_path}...''' )
_snake_case = BertConfig.from_json_file(__lowercase )
_snake_case = BertModel(__lowercase )
# Load weights from checkpoint
logger.info(f'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(__lowercase , __lowercase , __lowercase )
# Save pytorch-model
logger.info(f'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict() , __lowercase )
if __name__ == "__main__":
_lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
_lowerCamelCase : Any = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) | 686 |
import random
from .binary_exp_mod import bin_exp_mod
def a_ ( __lowercase : int , __lowercase : Any=1_000 ) -> int:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_snake_case = n - 1
_snake_case = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_snake_case = 0
while count < prec:
_snake_case = random.randint(2 , n - 1 )
_snake_case = bin_exp_mod(__lowercase , __lowercase , __lowercase )
if b != 1:
_snake_case = True
for _ in range(__lowercase ):
if b == n - 1:
_snake_case = False
break
_snake_case = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_lowerCamelCase : Tuple = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i))) | 686 | 1 |
# flake8: noqa
# Lint as: python3
_lowerCamelCase : Dict = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental | 686 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_lowerCamelCase : int = re.compile(r'''\s+''')
def a_ ( __lowercase : List[Any] ) -> int:
return {"hash": hashlib.mda(re.sub(__lowercase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def a_ ( __lowercase : List[Any] ) -> Dict:
_snake_case = [len(__lowercase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(__lowercase ), "line_max": max(__lowercase )}
def a_ ( __lowercase : Optional[int] ) -> List[str]:
_snake_case = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] ) -> Optional[int]:
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def a_ ( __lowercase : Union[str, Any] , __lowercase : int=5 ) -> Optional[Any]:
_snake_case = ['auto-generated', 'autogenerated', 'automatically generated']
_snake_case = example['content'].splitlines()
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a_ ( __lowercase : List[Any] , __lowercase : int=5 , __lowercase : Tuple=0.0_5 ) -> Union[str, Any]:
_snake_case = ['unit tests', 'test file', 'configuration file']
_snake_case = example['content'].splitlines()
_snake_case = 0
_snake_case = 0
# first test
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_snake_case = example['content'].count('\n' )
_snake_case = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a_ ( __lowercase : Union[str, Any] ) -> Any:
_snake_case = ['def ', 'class ', 'for ', 'while ']
_snake_case = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a_ ( __lowercase : Tuple , __lowercase : Any=4 ) -> List[str]:
_snake_case = example['content'].splitlines()
_snake_case = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a_ ( __lowercase : Dict ) -> Dict:
_snake_case = tokenizer(example['content'] , truncation=__lowercase )['input_ids']
_snake_case = len(example['content'] ) / len(__lowercase )
return {"ratio": ratio}
def a_ ( __lowercase : Optional[Any] ) -> Any:
_snake_case = {}
results.update(get_hash(__lowercase ) )
results.update(line_stats(__lowercase ) )
results.update(alpha_stats(__lowercase ) )
results.update(char_token_ratio(__lowercase ) )
results.update(is_autogenerated(__lowercase ) )
results.update(is_config_or_test(__lowercase ) )
results.update(has_no_keywords(__lowercase ) )
results.update(has_few_assignments(__lowercase ) )
return results
def a_ ( __lowercase : Optional[int] , __lowercase : str , __lowercase : List[Any] ) -> int:
if not check_uniques(__lowercase , __lowercase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a_ ( __lowercase : Dict ) -> Dict:
with open(__lowercase , 'rb' ) as f_in:
with gzip.open(str(__lowercase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(__lowercase , __lowercase )
os.unlink(__lowercase )
# Settings
_lowerCamelCase : Dict = HfArgumentParser(PreprocessingArguments)
_lowerCamelCase : Dict = parser.parse_args()
if args.num_workers is None:
_lowerCamelCase : int = multiprocessing.cpu_count()
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_lowerCamelCase : Any = time.time()
_lowerCamelCase : Optional[Any] = load_dataset(args.dataset_name, split='''train''')
print(F'Time to load dataset: {time.time()-t_start:.2f}')
# Run preprocessing
_lowerCamelCase : Optional[int] = time.time()
_lowerCamelCase : Union[str, Any] = ds.map(preprocess, num_proc=args.num_workers)
print(F'Time to preprocess dataset: {time.time()-t_start:.2f}')
# Deduplicate hashes
_lowerCamelCase : List[Any] = set(ds.unique('''hash'''))
_lowerCamelCase : Dict = len(uniques) / len(ds)
print(F'Fraction of duplicates: {1-frac:.2%}')
# Deduplicate data and apply heuristics
_lowerCamelCase : List[Any] = time.time()
_lowerCamelCase : Optional[int] = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F'Time to filter dataset: {time.time()-t_start:.2f}')
print(F'Size of filtered dataset: {len(ds_filter)}')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_lowerCamelCase : Union[str, Any] = time.time()
_lowerCamelCase , _lowerCamelCase : Dict = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'Time to deduplicate dataset: {time.time()-t_start:.2f}')
print(F'Size of deduplicate dataset: {len(ds_filter)}')
# Save data in batches of samples_per_file
_lowerCamelCase : Optional[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
_lowerCamelCase : int = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
_lowerCamelCase : Union[str, Any] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_lowerCamelCase : Dict = str(data_dir / F'file-{file_number+1:012}.json')
_lowerCamelCase : str = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'Time to save dataset: {time.time()-t_start:.2f}') | 686 | 1 |
from __future__ import annotations
import math
def a_ ( __lowercase : int , __lowercase : int , __lowercase : bool , __lowercase : list[int] , __lowercase : float ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(__lowercase ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __lowercase , __lowercase , __lowercase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowercase , __lowercase , __lowercase ) , )
return min(
minimax(depth + 1 , node_index * 2 , __lowercase , __lowercase , __lowercase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowercase , __lowercase , __lowercase ) , )
def a_ ( ) -> None:
_snake_case = [90, 23, 6, 33, 21, 65, 123, 34_423]
_snake_case = math.log(len(__lowercase ) , 2 )
print('Optimal value : ' , end='' )
print(minimax(0 , 0 , __lowercase , __lowercase , __lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 686 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : int = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = "yolos"
def __init__( self : int , lowercase : List[str]=768 , lowercase : Tuple=12 , lowercase : int=12 , lowercase : int=3_072 , lowercase : Optional[int]="gelu" , lowercase : str=0.0 , lowercase : Optional[int]=0.0 , lowercase : Optional[Any]=0.02 , lowercase : List[str]=1E-12 , lowercase : Dict=[512, 864] , lowercase : Union[str, Any]=16 , lowercase : List[Any]=3 , lowercase : List[str]=True , lowercase : Optional[int]=100 , lowercase : int=True , lowercase : Dict=False , lowercase : str=1 , lowercase : int=5 , lowercase : Tuple=2 , lowercase : List[str]=5 , lowercase : Any=2 , lowercase : List[str]=0.1 , **lowercase : int , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = qkv_bias
_snake_case = num_detection_tokens
_snake_case = use_mid_position_embeddings
_snake_case = auxiliary_loss
# Hungarian matcher
_snake_case = class_cost
_snake_case = bbox_cost
_snake_case = giou_cost
# Loss coefficients
_snake_case = bbox_loss_coefficient
_snake_case = giou_loss_coefficient
_snake_case = eos_coefficient
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = version.parse("1.11" )
@property
def A ( self : str ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Any ):
'''simple docstring'''
return 1E-4
@property
def A ( self : List[Any] ):
'''simple docstring'''
return 12 | 686 | 1 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __lowercase : int , __lowercase : Union[str, Any] , __lowercase : str ) -> int:
# Initialise PyTorch model
_snake_case = MobileBertConfig.from_json_file(__lowercase )
print(f'''Building PyTorch model from configuration: {config}''' )
_snake_case = MobileBertForPreTraining(__lowercase )
# Load weights from tf checkpoint
_snake_case = load_tf_weights_in_mobilebert(__lowercase , __lowercase , __lowercase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowercase )
if __name__ == "__main__":
_lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCamelCase : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path) | 686 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_lowerCamelCase : Tuple = logging.get_logger(__name__)
# General docstring
_lowerCamelCase : Union[str, Any] = '''ResNetConfig'''
# Base docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[Any] = [1, 2_048, 7, 7]
# Image classification docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[int] = '''tiger cat'''
_lowerCamelCase : str = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 3 , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(
lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=kernel_size // 2 , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def A ( self : Union[str, Any] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def A ( self : Tuple , lowercase : Tensor ):
'''simple docstring'''
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowercase )
_snake_case = self.pooler(lowercase )
return embedding
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase : int , lowercase : int , lowercase : int = 2 ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(lowercase , lowercase , kernel_size=1 , stride=lowercase , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : List[str] , lowercase : List[str] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" , lowercase : int = 4 ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , kernel_size=1 ) , ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : Dict , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowercase : ResNetConfig , lowercase : int , lowercase : int , lowercase : int = 2 , lowercase : int = 2 , ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , stride=lowercase , activation=config.hidden_act ) , *[layer(lowercase , lowercase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase , config.depths[1:] ):
self.stages.append(ResNetStage(lowercase , lowercase , lowercase , depth=lowercase ) )
def A ( self : str , lowercase : Tensor , lowercase : bool = False , lowercase : bool = True ):
'''simple docstring'''
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowercase )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase , hidden_states=lowercase , )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = ResNetConfig
_UpperCAmelCase : Tuple = "resnet"
_UpperCAmelCase : Optional[Any] = "pixel_values"
_UpperCAmelCase : Dict = True
def A ( self : List[str] , lowercase : Dict ):
'''simple docstring'''
if isinstance(lowercase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowercase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def A ( self : Tuple , lowercase : List[Any] , lowercase : Optional[Any]=False ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
_snake_case = value
_lowerCamelCase : str = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCamelCase : int = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : Any ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : Union[str, Any] , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowercase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase : int ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowercase )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Union[str, Any] , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[torch.LongTensor] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowercase )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowercase , lowercase )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : Union[str, Any] ):
'''simple docstring'''
super().__init__(lowercase )
super()._init_backbone(lowercase )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@replace_return_docstrings(output_type=lowercase , config_class=_CONFIG_FOR_DOC )
def A ( self : Dict , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowercase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowercase , ) | 686 | 1 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def a_ ( ) -> Optional[int]:
_snake_case , _snake_case = 9, 14 # noqa: F841
_snake_case = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_snake_case = defaultdict(__lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_snake_case = mst(__lowercase )
_snake_case = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_snake_case = tuple(answer[:2] )
_snake_case = tuple(edge[::-1] )
assert edge in result or reverse in result | 686 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 1 |
from __future__ import annotations
from math import pi
def a_ ( __lowercase : float , __lowercase : float , __lowercase : float ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if inductance < 0:
raise ValueError('Inductance cannot be negative' )
if frequency < 0:
raise ValueError('Frequency cannot be negative' )
if reactance < 0:
raise ValueError('Inductive reactance cannot be negative' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def a_ ( __lowercase : Union[str, Any] ) -> List[Any]:
_snake_case = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
_snake_case = DetaConfig(
backbone_config=__lowercase , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=__lowercase , with_box_refine=__lowercase , two_stage=__lowercase , )
# set labels
_snake_case = 'huggingface/label-files'
if "o365" in model_name:
_snake_case = 366
_snake_case = 'object365-id2label.json'
else:
_snake_case = 91
_snake_case = 'coco-detection-id2label.json'
_snake_case = num_labels
_snake_case = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type='dataset' ) ) , 'r' ) )
_snake_case = {int(__lowercase ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def a_ ( __lowercase : int ) -> str:
_snake_case = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : str ) -> Union[str, Any]:
_snake_case = dct.pop(__lowercase )
_snake_case = val
def a_ ( __lowercase : List[str] , __lowercase : str ) -> Dict:
_snake_case = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_snake_case = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:dim, :]
_snake_case = in_proj_bias[: dim]
_snake_case = in_proj_weight[
dim : dim * 2, :
]
_snake_case = in_proj_bias[
dim : dim * 2
]
_snake_case = in_proj_weight[
-dim :, :
]
_snake_case = in_proj_bias[-dim :]
# fmt: on
def a_ ( __lowercase : Dict , __lowercase : Dict ) -> str:
# transformer decoder self-attention layers
_snake_case = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:hidden_size, :]
_snake_case = in_proj_bias[:hidden_size]
_snake_case = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_snake_case = in_proj_bias[hidden_size : hidden_size * 2]
_snake_case = in_proj_weight[-hidden_size:, :]
_snake_case = in_proj_bias[-hidden_size:]
def a_ ( ) -> List[str]:
_snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a_ ( __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Tuple ) -> Optional[Any]:
_snake_case = get_deta_config(__lowercase )
# load original state dict
if model_name == "deta-swin-large":
_snake_case = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
_snake_case = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
_snake_case = torch.load(__lowercase , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(__lowercase , param.shape )
# rename keys
_snake_case = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_swin_q_k_v(__lowercase , config.backbone_config )
read_in_decoder_q_k_v(__lowercase , __lowercase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "input_proj" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
# finally, create HuggingFace model and load state dict
_snake_case = DetaForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
_snake_case = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(__lowercase )
# load image processor
_snake_case = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
_snake_case = prepare_img()
_snake_case = processor(images=__lowercase , return_tensors='pt' )
_snake_case = encoding['pixel_values']
_snake_case = model(pixel_values.to(__lowercase ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_snake_case = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
_snake_case = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
_snake_case = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
_snake_case = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__lowercase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__lowercase ) , atol=1E-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 686 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = "resnet"
_UpperCAmelCase : Any = ["basic", "bottleneck"]
def __init__( self : Union[str, Any] , lowercase : Dict=3 , lowercase : Any=64 , lowercase : Any=[256, 512, 1_024, 2_048] , lowercase : Dict=[3, 4, 6, 3] , lowercase : Any="bottleneck" , lowercase : Optional[Any]="relu" , lowercase : Dict=False , lowercase : str=None , lowercase : Tuple=None , **lowercase : List[Any] , ):
'''simple docstring'''
super().__init__(**lowercase )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
_snake_case = num_channels
_snake_case = embedding_size
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = layer_type
_snake_case = hidden_act
_snake_case = downsample_in_first_stage
_snake_case = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(lowercase ) + 1 )]
_snake_case , _snake_case = get_aligned_output_features_output_indices(
out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = version.parse("1.11" )
@property
def A ( self : int ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
return 1E-3 | 686 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_lowerCamelCase : Dict = '''pt'''
elif is_tf_available():
_lowerCamelCase : List[str] = '''tf'''
else:
_lowerCamelCase : List[Any] = '''jax'''
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = PerceiverTokenizer
_UpperCAmelCase : Optional[int] = False
def A ( self : Tuple ):
'''simple docstring'''
super().setUp()
_snake_case = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A ( self : str ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def A ( self : Optional[int] , **lowercase : Dict ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : Optional[int] , lowercase : Tuple , lowercase : Optional[Any]=False , lowercase : int=20 , lowercase : Optional[int]=5 ):
'''simple docstring'''
_snake_case = []
for i in range(len(lowercase ) ):
try:
_snake_case = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_snake_case = list(filter(lambda lowercase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , lowercase ) )
_snake_case = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
_snake_case = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
_snake_case = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case = [t[0] for t in toks]
# Ensure consistency
_snake_case = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
_snake_case = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
_snake_case = ' ' + output_txt
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = 'Unicode €.'
_snake_case = tokenizer(lowercase )
_snake_case = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_snake_case = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]Unicode €.[SEP]' )
_snake_case = tokenizer('e è é ê ë' )
_snake_case = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_snake_case = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_snake_case = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
if FRAMEWORK != "jax":
_snake_case = list(batch.input_ids.numpy()[0] )
else:
_snake_case = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , lowercase )
self.assertIn('attention_mask' , lowercase )
self.assertNotIn('decoder_input_ids' , lowercase )
self.assertNotIn('decoder_attention_mask' , lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = [
'Summary of the text.',
'Another summary.',
]
_snake_case = tokenizer(
text_target=lowercase , max_length=32 , padding='max_length' , truncation=lowercase , return_tensors=lowercase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_snake_case = tokenizer.__class__.from_pretrained(lowercase )
_snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
shutil.rmtree(lowercase )
_snake_case = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_snake_case = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_snake_case = tokenizer.__class__.from_pretrained(lowercase )
_snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_snake_case = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowercase )
_snake_case = [f'''<extra_id_{i}>''' for i in range(125 )]
_snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
_snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case = tokenizer_class.from_pretrained(
lowercase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=lowercase )]
_snake_case = tokenizer_class.from_pretrained(
lowercase , additional_special_tokens=lowercase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
pass
def A ( self : List[str] ):
'''simple docstring'''
pass
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : int ):
'''simple docstring'''
_snake_case = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_snake_case = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
_snake_case = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(lowercase , lowercase ) | 686 | 1 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_lowerCamelCase : Tuple = logging.get_logger(__name__)
# General docstring
_lowerCamelCase : Union[str, Any] = '''ResNetConfig'''
# Base docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[Any] = [1, 2_048, 7, 7]
# Image classification docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[int] = '''tiger cat'''
_lowerCamelCase : str = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 3 , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(
lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=kernel_size // 2 , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def A ( self : Union[str, Any] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def A ( self : Tuple , lowercase : Tensor ):
'''simple docstring'''
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowercase )
_snake_case = self.pooler(lowercase )
return embedding
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase : int , lowercase : int , lowercase : int = 2 ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(lowercase , lowercase , kernel_size=1 , stride=lowercase , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : List[str] , lowercase : List[str] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" , lowercase : int = 4 ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , kernel_size=1 ) , ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : Dict , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowercase : ResNetConfig , lowercase : int , lowercase : int , lowercase : int = 2 , lowercase : int = 2 , ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , stride=lowercase , activation=config.hidden_act ) , *[layer(lowercase , lowercase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase , config.depths[1:] ):
self.stages.append(ResNetStage(lowercase , lowercase , lowercase , depth=lowercase ) )
def A ( self : str , lowercase : Tensor , lowercase : bool = False , lowercase : bool = True ):
'''simple docstring'''
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowercase )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase , hidden_states=lowercase , )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = ResNetConfig
_UpperCAmelCase : Tuple = "resnet"
_UpperCAmelCase : Optional[Any] = "pixel_values"
_UpperCAmelCase : Dict = True
def A ( self : List[str] , lowercase : Dict ):
'''simple docstring'''
if isinstance(lowercase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowercase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def A ( self : Tuple , lowercase : List[Any] , lowercase : Optional[Any]=False ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
_snake_case = value
_lowerCamelCase : str = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCamelCase : int = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : Any ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : Union[str, Any] , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowercase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase : int ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowercase )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Union[str, Any] , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[torch.LongTensor] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowercase )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowercase , lowercase )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : Union[str, Any] ):
'''simple docstring'''
super().__init__(lowercase )
super()._init_backbone(lowercase )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@replace_return_docstrings(output_type=lowercase , config_class=_CONFIG_FOR_DOC )
def A ( self : Dict , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowercase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowercase , ) | 686 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def a_ ( ) -> Optional[int]:
_snake_case , _snake_case = 9, 14 # noqa: F841
_snake_case = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_snake_case = defaultdict(__lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_snake_case = mst(__lowercase )
_snake_case = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_snake_case = tuple(answer[:2] )
_snake_case = tuple(edge[::-1] )
assert edge in result or reverse in result | 686 | 1 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = DownBlockaD # noqa F405
_UpperCAmelCase : Union[str, Any] = "down"
def A ( self : Any ):
'''simple docstring'''
_snake_case = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(lowercase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : str = ResnetDownsampleBlockaD # noqa F405
_UpperCAmelCase : Union[str, Any] = "down"
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(lowercase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = AttnDownBlockaD # noqa F405
_UpperCAmelCase : Optional[int] = "down"
def A ( self : str ):
'''simple docstring'''
_snake_case = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(lowercase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[str] = CrossAttnDownBlockaD # noqa F405
_UpperCAmelCase : Any = "down"
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case = super().prepare_init_args_and_inputs_for_common()
_snake_case = 32
return init_dict, inputs_dict
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(lowercase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = SimpleCrossAttnDownBlockaD # noqa F405
_UpperCAmelCase : Any = "down"
@property
def A ( self : Optional[int] ):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def A ( self : int ):
'''simple docstring'''
_snake_case , _snake_case = super().prepare_init_args_and_inputs_for_common()
_snake_case = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' )
def A ( self : Any ):
'''simple docstring'''
_snake_case = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(lowercase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = SkipDownBlockaD # noqa F405
_UpperCAmelCase : Optional[Any] = "down"
@property
def A ( self : str ):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=lowercase )
def A ( self : int ):
'''simple docstring'''
_snake_case = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(lowercase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : str = AttnSkipDownBlockaD # noqa F405
_UpperCAmelCase : Optional[int] = "down"
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(lowercase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Any = DownEncoderBlockaD # noqa F405
_UpperCAmelCase : Optional[int] = "down"
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
return super().get_dummy_input(include_temb=lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = {
'in_channels': 32,
'out_channels': 32,
}
_snake_case = self.dummy_input
return init_dict, inputs_dict
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(lowercase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = AttnDownEncoderBlockaD # noqa F405
_UpperCAmelCase : Optional[int] = "down"
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
return super().get_dummy_input(include_temb=lowercase )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = {
'in_channels': 32,
'out_channels': 32,
}
_snake_case = self.dummy_input
return init_dict, inputs_dict
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(lowercase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Any = UNetMidBlockaD # noqa F405
_UpperCAmelCase : str = "mid"
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = {
'in_channels': 32,
'temb_channels': 128,
}
_snake_case = self.dummy_input
return init_dict, inputs_dict
def A ( self : Dict ):
'''simple docstring'''
_snake_case = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(lowercase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : int = UNetMidBlockaDCrossAttn # noqa F405
_UpperCAmelCase : Dict = "mid"
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case = super().prepare_init_args_and_inputs_for_common()
_snake_case = 32
return init_dict, inputs_dict
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(lowercase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = UNetMidBlockaDSimpleCrossAttn # noqa F405
_UpperCAmelCase : Any = "mid"
@property
def A ( self : Any ):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def A ( self : int ):
'''simple docstring'''
_snake_case , _snake_case = super().prepare_init_args_and_inputs_for_common()
_snake_case = 32
return init_dict, inputs_dict
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(lowercase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = UpBlockaD # noqa F405
_UpperCAmelCase : Optional[int] = "up"
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(lowercase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[str] = ResnetUpsampleBlockaD # noqa F405
_UpperCAmelCase : str = "up"
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(lowercase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Any = CrossAttnUpBlockaD # noqa F405
_UpperCAmelCase : Optional[Any] = "up"
@property
def A ( self : Any ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def A ( self : Any ):
'''simple docstring'''
_snake_case , _snake_case = super().prepare_init_args_and_inputs_for_common()
_snake_case = 32
return init_dict, inputs_dict
def A ( self : str ):
'''simple docstring'''
_snake_case = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(lowercase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = SimpleCrossAttnUpBlockaD # noqa F405
_UpperCAmelCase : Optional[int] = "up"
@property
def A ( self : Any ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase , include_encoder_hidden_states=lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case , _snake_case = super().prepare_init_args_and_inputs_for_common()
_snake_case = 32
return init_dict, inputs_dict
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(lowercase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Tuple = AttnUpBlockaD # noqa F405
_UpperCAmelCase : int = "up"
@property
def A ( self : Optional[int] ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' )
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(lowercase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = SkipUpBlockaD # noqa F405
_UpperCAmelCase : int = "up"
@property
def A ( self : str ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(lowercase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = AttnSkipUpBlockaD # noqa F405
_UpperCAmelCase : Union[str, Any] = "up"
@property
def A ( self : Tuple ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(lowercase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[str] = UpDecoderBlockaD # noqa F405
_UpperCAmelCase : List[str] = "up"
@property
def A ( self : Dict ):
'''simple docstring'''
return super().get_dummy_input(include_temb=lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = {'in_channels': 32, 'out_channels': 32}
_snake_case = self.dummy_input
return init_dict, inputs_dict
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(lowercase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = AttnUpDecoderBlockaD # noqa F405
_UpperCAmelCase : List[str] = "up"
@property
def A ( self : Dict ):
'''simple docstring'''
return super().get_dummy_input(include_temb=lowercase )
def A ( self : int ):
'''simple docstring'''
_snake_case = {'in_channels': 32, 'out_channels': 32}
_snake_case = self.dummy_input
return init_dict, inputs_dict
def A ( self : Dict ):
'''simple docstring'''
_snake_case = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(lowercase ) | 686 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Tuple = ["transformers", "torch", "note_seq"]
def __init__( self : List[Any] , *lowercase : List[Any] , **lowercase : Dict ):
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] ) | 686 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
_lowerCamelCase : Tuple = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def a_ ( __lowercase : Optional[int] ) -> Union[str, Any]:
_snake_case = torch.load(__lowercase , map_location='cpu' )
return sd
def a_ ( __lowercase : Union[str, Any] , __lowercase : Any , __lowercase : Union[str, Any]=rename_keys_prefix ) -> Union[str, Any]:
_snake_case = OrderedDict()
_snake_case = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_snake_case = key
for name_pair in rename_keys_prefix:
_snake_case = new_key.replace(name_pair[0] , name_pair[1] )
_snake_case = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_snake_case = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def a_ ( __lowercase : Dict , __lowercase : Optional[Any] ) -> List[Any]:
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
_snake_case = 'pretraining'
if "vcr" in checkpoint_path:
_snake_case = {'visual_embedding_dim': 512}
elif "vqa_advanced" in checkpoint_path:
_snake_case = {'visual_embedding_dim': 2_048}
elif "vqa" in checkpoint_path:
_snake_case = {'visual_embedding_dim': 2_048}
elif "nlvr" in checkpoint_path:
_snake_case = {'visual_embedding_dim': 1_024}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
_snake_case = {'visual_embedding_dim': 512}
_snake_case = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
_snake_case = {'visual_embedding_dim': 2_048}
_snake_case = 'vqa_advanced'
elif "vqa" in checkpoint_path:
_snake_case = {'visual_embedding_dim': 2_048, 'num_labels': 3_129}
_snake_case = 'vqa'
elif "nlvr" in checkpoint_path:
_snake_case = {
'visual_embedding_dim': 1_024,
'num_labels': 2,
}
_snake_case = 'nlvr'
_snake_case = VisualBertConfig(**__lowercase )
# Load State Dict
_snake_case = load_state_dict(__lowercase )
_snake_case = get_new_dict(__lowercase , __lowercase )
if model_type == "pretraining":
_snake_case = VisualBertForPreTraining(__lowercase )
elif model_type == "vqa":
_snake_case = VisualBertForQuestionAnswering(__lowercase )
elif model_type == "nlvr":
_snake_case = VisualBertForVisualReasoning(__lowercase )
elif model_type == "multichoice":
_snake_case = VisualBertForMultipleChoice(__lowercase )
model.load_state_dict(__lowercase )
# Save Checkpoints
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
_lowerCamelCase : int = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path) | 686 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a_ ( ) -> Optional[Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__lowercase ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def a_ ( ) -> Optional[int]:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def a_ ( ) -> Dict:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__lowercase ):
http_head('https://huggingface.co' ) | 686 | 1 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_lowerCamelCase : Tuple = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : str , *lowercase : Dict , **lowercase : int ):
'''simple docstring'''
super().__init__(*lowercase , **lowercase )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def A ( self : Optional[Any] , lowercase : Union[str, Any]=None ):
'''simple docstring'''
_snake_case = {}
if top_k is not None:
_snake_case = top_k
return {}, {}, postprocess_params
def __call__( self : Dict , lowercase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowercase : Union[str, Any] ):
'''simple docstring'''
return super().__call__(lowercase , **lowercase )
def A ( self : Dict , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = load_image(lowercase )
_snake_case = self.image_processor(images=lowercase , return_tensors=self.framework )
return model_inputs
def A ( self : Any , lowercase : Dict ):
'''simple docstring'''
_snake_case = self.model(**lowercase )
return model_outputs
def A ( self : Dict , lowercase : Union[str, Any] , lowercase : Dict=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
_snake_case = self.model.config.num_labels
if self.framework == "pt":
_snake_case = model_outputs.logits.softmax(-1 )[0]
_snake_case , _snake_case = probs.topk(lowercase )
elif self.framework == "tf":
_snake_case = stable_softmax(model_outputs.logits , axis=-1 )[0]
_snake_case = tf.math.top_k(lowercase , k=lowercase )
_snake_case , _snake_case = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
_snake_case = scores.tolist()
_snake_case = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowercase , lowercase )] | 686 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCamelCase : Optional[int] = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
_lowerCamelCase : List[str] = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
_lowerCamelCase : Dict = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def A ( self : Union[str, Any] , lowercase : Tuple , lowercase : Optional[Any] , lowercase : int=None , lowercase : str=True , lowercase : List[str]=False ):
'''simple docstring'''
if rouge_types is None:
_snake_case = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
_snake_case = rouge_scorer.RougeScorer(rouge_types=lowercase , use_stemmer=lowercase )
if use_aggregator:
_snake_case = scoring.BootstrapAggregator()
else:
_snake_case = []
for ref, pred in zip(lowercase , lowercase ):
_snake_case = scorer.score(lowercase , lowercase )
if use_aggregator:
aggregator.add_scores(lowercase )
else:
scores.append(lowercase )
if use_aggregator:
_snake_case = aggregator.aggregate()
else:
_snake_case = {}
for key in scores[0]:
_snake_case = [score[key] for score in scores]
return result | 686 | 1 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_lowerCamelCase : Union[str, Any] = importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
_lowerCamelCase : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def a_ ( __lowercase : str ) -> str:
if "://" in dataset_path:
_snake_case = dataset_path.split('://' )[1]
return dataset_path
def a_ ( __lowercase : fsspec.AbstractFileSystem ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def a_ ( __lowercase : fsspec.AbstractFileSystem , __lowercase : str , __lowercase : str ) -> str:
_snake_case = not is_remote_filesystem(__lowercase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__lowercase ) , fs._strip_protocol(__lowercase ) )
else:
fs.mv(__lowercase , __lowercase , recursive=__lowercase )
def a_ ( ) -> None:
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_snake_case = None
_snake_case = None
_snake_case = threading.Lock() | 686 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = "swin2sr"
_UpperCAmelCase : Optional[int] = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[int] , lowercase : List[Any]=64 , lowercase : int=1 , lowercase : Union[str, Any]=3 , lowercase : Dict=180 , lowercase : List[Any]=[6, 6, 6, 6, 6, 6] , lowercase : Dict=[6, 6, 6, 6, 6, 6] , lowercase : List[Any]=8 , lowercase : List[str]=2.0 , lowercase : Tuple=True , lowercase : Union[str, Any]=0.0 , lowercase : Dict=0.0 , lowercase : Optional[int]=0.1 , lowercase : int="gelu" , lowercase : List[str]=False , lowercase : List[Any]=0.02 , lowercase : List[Any]=1E-5 , lowercase : Optional[int]=2 , lowercase : Tuple=1.0 , lowercase : List[Any]="1conv" , lowercase : List[Any]="pixelshuffle" , **lowercase : List[str] , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = embed_dim
_snake_case = depths
_snake_case = len(lowercase )
_snake_case = num_heads
_snake_case = window_size
_snake_case = mlp_ratio
_snake_case = qkv_bias
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = drop_path_rate
_snake_case = hidden_act
_snake_case = use_absolute_embeddings
_snake_case = layer_norm_eps
_snake_case = initializer_range
_snake_case = upscale
_snake_case = img_range
_snake_case = resi_connection
_snake_case = upsampler | 686 | 1 |
import pytest
_lowerCamelCase : Dict = '''__dummy_dataset1__'''
_lowerCamelCase : Optional[int] = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def a_ ( ) -> List[str]:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def a_ ( ) -> Tuple:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def a_ ( __lowercase : List[str] , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] ) -> Optional[int]:
_snake_case = dataset_loading_script_name
_snake_case = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=__lowercase )
_snake_case = script_dir / f'''{script_name}.py'''
with open(__lowercase , 'w' ) as f:
f.write(__lowercase )
return str(__lowercase ) | 686 |
import random
def a_ ( __lowercase : str , __lowercase : Any , __lowercase : Any ) -> Optional[Any]:
_snake_case = a[left_index]
_snake_case = left_index + 1
for j in range(left_index + 1 , __lowercase ):
if a[j] < pivot:
_snake_case , _snake_case = a[i], a[j]
i += 1
_snake_case , _snake_case = a[i - 1], a[left_index]
return i - 1
def a_ ( __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Optional[int] ) -> Tuple:
if left < right:
_snake_case = random.randint(__lowercase , right - 1 )
_snake_case , _snake_case = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_snake_case = partition(__lowercase , __lowercase , __lowercase )
quick_sort_random(
__lowercase , __lowercase , __lowercase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__lowercase , pivot_index + 1 , __lowercase ) # recursive quicksort to the right of the pivot point
def a_ ( ) -> str:
_snake_case = input('Enter numbers separated by a comma:\n' ).strip()
_snake_case = [int(__lowercase ) for item in user_input.split(',' )]
quick_sort_random(__lowercase , 0 , len(__lowercase ) )
print(__lowercase )
if __name__ == "__main__":
main() | 686 | 1 |
from statistics import mean, stdev
def a_ ( __lowercase : list , __lowercase : int = 3 ) -> list:
_snake_case = min(__lowercase )
_snake_case = max(__lowercase )
# normalize data
return [round((x - x_min) / (x_max - x_min) , __lowercase ) for x in data]
def a_ ( __lowercase : list , __lowercase : int = 3 ) -> list:
_snake_case = mean(__lowercase )
_snake_case = stdev(__lowercase )
# standardize data
return [round((x - mu) / (sigma) , __lowercase ) for x in data] | 686 |
import math
def a_ ( __lowercase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a_ ( __lowercase : float = 0.1 ) -> int:
_snake_case = 3
_snake_case = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__lowercase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 | 1 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
_lowerCamelCase : Dict = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
_lowerCamelCase : Tuple = json.load(f)
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : List[str] , lowercase : Optional[int] ):
'''simple docstring'''
return FSMTTokenizer.from_pretrained(lowercase )
def A ( self : Dict , lowercase : Any ):
'''simple docstring'''
_snake_case = FSMTForConditionalGeneration.from_pretrained(lowercase ).to(lowercase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def A ( self : Optional[int] , lowercase : Any , lowercase : int ):
'''simple docstring'''
_snake_case = f'''facebook/wmt19-{pair}'''
_snake_case = self.get_tokenizer(lowercase )
_snake_case = self.get_model(lowercase )
_snake_case = bleu_data[pair]['src']
_snake_case = bleu_data[pair]['tgt']
_snake_case = tokenizer(lowercase , return_tensors='pt' , truncation=lowercase , padding='longest' ).to(lowercase )
_snake_case = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
_snake_case = tokenizer.batch_decode(
lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
_snake_case = calculate_bleu(lowercase , lowercase )
print(lowercase )
self.assertGreaterEqual(scores['bleu'] , lowercase ) | 686 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = "resnet"
_UpperCAmelCase : Any = ["basic", "bottleneck"]
def __init__( self : Union[str, Any] , lowercase : Dict=3 , lowercase : Any=64 , lowercase : Any=[256, 512, 1_024, 2_048] , lowercase : Dict=[3, 4, 6, 3] , lowercase : Any="bottleneck" , lowercase : Optional[Any]="relu" , lowercase : Dict=False , lowercase : str=None , lowercase : Tuple=None , **lowercase : List[Any] , ):
'''simple docstring'''
super().__init__(**lowercase )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
_snake_case = num_channels
_snake_case = embedding_size
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = layer_type
_snake_case = hidden_act
_snake_case = downsample_in_first_stage
_snake_case = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(lowercase ) + 1 )]
_snake_case , _snake_case = get_aligned_output_features_output_indices(
out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = version.parse("1.11" )
@property
def A ( self : int ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
return 1E-3 | 686 | 1 |
def a_ ( __lowercase : List[Any] ) -> Dict:
_snake_case = []
_snake_case = set({'(', '[', '{'} )
_snake_case = set({')', ']', '}'} )
_snake_case = {'{': '}', '[': ']', '(': ')'}
for i in range(len(__lowercase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(__lowercase ) == 0 or (len(__lowercase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(__lowercase ) == 0
def a_ ( ) -> Dict:
_snake_case = input('Enter sequence of brackets: ' )
if is_balanced(__lowercase ):
print(__lowercase , 'is balanced' )
else:
print(__lowercase , 'is not balanced' )
if __name__ == "__main__":
main() | 686 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : List[Any] , lowercase : Union[str, Any] , lowercase : int ):
'''simple docstring'''
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(lowercase ) for s in shape] )}.npy'''
def A ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def A ( self : List[Any] , lowercase : Tuple=0 , lowercase : Optional[int]=(4, 4, 64, 64) , lowercase : Optional[int]=False ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase )
return image
def A ( self : Tuple , lowercase : Any=False , lowercase : Union[str, Any]="CompVis/stable-diffusion-v1-4" ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = 'bf16' if fpaa else None
_snake_case , _snake_case = FlaxUNetaDConditionModel.from_pretrained(
lowercase , subfolder='unet' , dtype=lowercase , revision=lowercase )
return model, params
def A ( self : Union[str, Any] , lowercase : str=0 , lowercase : Optional[Any]=(4, 77, 768) , lowercase : int=False ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def A ( self : Tuple , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : List[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=lowercase )
_snake_case = self.get_latents(lowercase , fpaa=lowercase )
_snake_case = self.get_encoder_hidden_states(lowercase , fpaa=lowercase )
_snake_case = model.apply(
{'params': params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
_snake_case = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_snake_case = jnp.array(lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def A ( self : str , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : List[str] ):
'''simple docstring'''
_snake_case , _snake_case = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=lowercase )
_snake_case = self.get_latents(lowercase , shape=(4, 4, 96, 96) , fpaa=lowercase )
_snake_case = self.get_encoder_hidden_states(lowercase , shape=(4, 77, 1_024) , fpaa=lowercase )
_snake_case = model.apply(
{'params': params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
_snake_case = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_snake_case = jnp.array(lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1E-2 ) | 686 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : Union[str, Any] = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_lowerCamelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( __lowercase : Any ) -> List[Any]:
_snake_case = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a_ ( __lowercase : Dict ) -> Tuple:
_snake_case , _snake_case = emb.weight.shape
_snake_case = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_snake_case = emb.weight.data
return lin_layer
def a_ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None ) -> Tuple:
_snake_case = {}
for old_key in state_dict.keys():
_snake_case = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_snake_case = key.replace('moe_layer.experts.0' , f'''ffn.experts.expert_{expert_idx}''' )
else:
_snake_case = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
_snake_case = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
_snake_case = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
_snake_case = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
_snake_case = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
_snake_case = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
_snake_case = key.replace('final_layer_norm' , 'ff_layer_norm' )
_snake_case = state_dict[old_key]
return new_dict
def a_ ( __lowercase : Optional[Any] , __lowercase : Tuple , __lowercase : Any , __lowercase : List[str] , __lowercase : str = WEIGHTS_NAME ) -> Union[str, Any]:
_snake_case = []
_snake_case = 0
os.makedirs(__lowercase , exist_ok=__lowercase )
for expert in range(__lowercase ):
_snake_case = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(__lowercase ):
_snake_case = torch.load(__lowercase )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = os.path.join(
__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
torch.save(__lowercase , __lowercase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__lowercase )[0]].dtype )
# Add the last block
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
_snake_case = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__lowercase ) == 1:
_snake_case = os.path.join(__lowercase , __lowercase )
torch.save(__lowercase , __lowercase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__lowercase , __lowercase )
# Otherwise, let's build the index
_snake_case = {}
for idx, shard in enumerate(__lowercase ):
_snake_case = weights_name.replace('.bin' , f'''-{idx+1:05d}-of-{len(__lowercase ):05d}.bin''' )
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__lowercase , os.path.join(__lowercase , __lowercase ) )
for key in shard:
_snake_case = shard_file
# Add the metadata
_snake_case = {'total_size': total_size}
_snake_case = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(__lowercase , __lowercase ) , 'w' , encoding='utf-8' ) as f:
_snake_case = json.dumps(__lowercase , indent=2 , sort_keys=__lowercase ) + '\n'
f.write(__lowercase )
return metadata, index
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
_lowerCamelCase : List[str] = parser.parse_args()
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_lowerCamelCase : Tuple = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_lowerCamelCase : Dict = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path) | 686 | 1 |
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
_lowerCamelCase : Tuple = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def A ( self : int , lowercase : Dict ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
_snake_case = [label.strip() for label in labels.split(',' ) if label.strip()]
return labels
def __call__( self : Optional[Any] , lowercase : Optional[Any] , lowercase : Any , lowercase : Optional[Any] ):
'''simple docstring'''
if len(lowercase ) == 0 or len(lowercase ) == 0:
raise ValueError('You must include at least one label and at least one sequence.' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template "{}" was not able to be formatted with the target labels. '
'Make sure the passed template includes formatting syntax such as {{}} where the label should go.'
).format(lowercase ) )
if isinstance(lowercase , lowercase ):
_snake_case = [sequences]
_snake_case = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowercase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : List[str]=ZeroShotClassificationArgumentHandler() , *lowercase : List[str] , **lowercase : Tuple ):
'''simple docstring'''
_snake_case = args_parser
super().__init__(*lowercase , **lowercase )
if self.entailment_id == -1:
logger.warning(
'Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '
'-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.' )
@property
def A ( self : List[Any] ):
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('entail' ):
return ind
return -1
def A ( self : Optional[Any] , lowercase : List[Any] , lowercase : List[Any]=True , lowercase : Union[str, Any]=True , lowercase : Optional[int]=TruncationStrategy.ONLY_FIRST , **lowercase : List[str] ):
'''simple docstring'''
_snake_case = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'Tokenizer was not supporting padding necessary for zero-shot, attempting to use '
' `pad_token=eos_token`' )
_snake_case = self.tokenizer.eos_token
try:
_snake_case = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=lowercase , )
except Exception as e:
if "too short" in str(lowercase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
_snake_case = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def A ( self : Tuple , **lowercase : Tuple ):
'''simple docstring'''
if kwargs.get('multi_class' , lowercase ) is not None:
_snake_case = kwargs['multi_class']
logger.warning(
'The `multi_class` argument has been deprecated and renamed to `multi_label`. '
'`multi_class` will be removed in a future version of Transformers.' )
_snake_case = {}
if "candidate_labels" in kwargs:
_snake_case = self._args_parser._parse_labels(kwargs['candidate_labels'] )
if "hypothesis_template" in kwargs:
_snake_case = kwargs['hypothesis_template']
_snake_case = {}
if "multi_label" in kwargs:
_snake_case = kwargs['multi_label']
return preprocess_params, {}, postprocess_params
def __call__( self : int , lowercase : Union[str, List[str]] , *lowercase : Any , **lowercase : List[str] , ):
'''simple docstring'''
if len(lowercase ) == 0:
pass
elif len(lowercase ) == 1 and "candidate_labels" not in kwargs:
_snake_case = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(lowercase , **lowercase )
def A ( self : List[Any] , lowercase : int , lowercase : Any=None , lowercase : Union[str, Any]="This example is {}." ):
'''simple docstring'''
_snake_case , _snake_case = self._args_parser(lowercase , lowercase , lowercase )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowercase , lowercase ) ):
_snake_case = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowercase ) - 1,
**model_input,
}
def A ( self : Optional[int] , lowercase : int ):
'''simple docstring'''
_snake_case = inputs['candidate_label']
_snake_case = inputs['sequence']
_snake_case = {k: inputs[k] for k in self.tokenizer.model_input_names}
_snake_case = self.model(**lowercase )
_snake_case = {
'candidate_label': candidate_label,
'sequence': sequence,
'is_last': inputs['is_last'],
**outputs,
}
return model_outputs
def A ( self : int , lowercase : List[Any] , lowercase : Optional[int]=False ):
'''simple docstring'''
_snake_case = [outputs['candidate_label'] for outputs in model_outputs]
_snake_case = [outputs['sequence'] for outputs in model_outputs]
_snake_case = np.concatenate([output['logits'].numpy() for output in model_outputs] )
_snake_case = logits.shape[0]
_snake_case = len(lowercase )
_snake_case = N // n
_snake_case = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowercase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
_snake_case = self.entailment_id
_snake_case = -1 if entailment_id == 0 else 0
_snake_case = reshaped_outputs[..., [contradiction_id, entailment_id]]
_snake_case = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
_snake_case = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
_snake_case = reshaped_outputs[..., self.entailment_id]
_snake_case = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
_snake_case = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
} | 686 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_lowerCamelCase : List[Any] = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_lowerCamelCase : Any = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_lowerCamelCase : Union[str, Any] = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def a_ ( __lowercase : List[Any] , __lowercase : Any ) -> Union[str, Any]:
return float((preds == labels).mean() )
def a_ ( __lowercase : Optional[Any] , __lowercase : List[str] ) -> Dict:
_snake_case = simple_accuracy(__lowercase , __lowercase )
_snake_case = float(fa_score(y_true=__lowercase , y_pred=__lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( __lowercase : int , __lowercase : str ) -> str:
_snake_case = float(pearsonr(__lowercase , __lowercase )[0] )
_snake_case = float(spearmanr(__lowercase , __lowercase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def A ( self : List[Any] , lowercase : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowercase , lowercase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowercase , lowercase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowercase , lowercase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' ) | 686 | 1 |
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : List[Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
_snake_case = deepcopy(lowercase )
elif os.path.exists(lowercase ):
with io.open(lowercase , 'r' , encoding='utf-8' ) as f:
_snake_case = json.load(lowercase )
else:
try:
_snake_case = baseaa.urlsafe_baadecode(lowercase ).decode('utf-8' )
_snake_case = json.loads(lowercase )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
_snake_case = config
self.set_stage_and_offload()
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.get_value('zero_optimization.stage' , -1 )
# offload
_snake_case = False
if self.is_zeroa() or self.is_zeroa():
_snake_case = set(['cpu', 'nvme'] )
_snake_case = set(
[
self.get_value('zero_optimization.offload_optimizer.device' ),
self.get_value('zero_optimization.offload_param.device' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
_snake_case = True
def A ( self : Tuple , lowercase : Optional[Any] ):
'''simple docstring'''
_snake_case = self.config
# find the config node of interest if it exists
_snake_case = ds_key_long.split('.' )
_snake_case = nodes.pop()
for node in nodes:
_snake_case = config.get(lowercase )
if config is None:
return None, ds_key
return config, ds_key
def A ( self : Tuple , lowercase : Optional[Any] , lowercase : Union[str, Any]=None ):
'''simple docstring'''
_snake_case , _snake_case = self.find_config_node(lowercase )
if config is None:
return default
return config.get(lowercase , lowercase )
def A ( self : Tuple , lowercase : Optional[Any] , lowercase : Optional[Any]=False ):
'''simple docstring'''
_snake_case = self.config
# find the config node of interest if it exists
_snake_case = ds_key_long.split('.' )
for node in nodes:
_snake_case = config
_snake_case = config.get(lowercase )
if config is None:
if must_exist:
raise ValueError(f'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(lowercase )
def A ( self : Optional[int] , lowercase : Optional[Any] ):
'''simple docstring'''
_snake_case = self.get_value(lowercase )
return False if value is None else bool(lowercase )
def A ( self : Tuple , lowercase : int ):
'''simple docstring'''
_snake_case = self.get_value(lowercase )
return False if value is None else not bool(lowercase )
def A ( self : Dict ):
'''simple docstring'''
return self._stage == 2
def A ( self : List[Any] ):
'''simple docstring'''
return self._stage == 3
def A ( self : Dict ):
'''simple docstring'''
return self._offload
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int ):
'''simple docstring'''
_snake_case = engine
def A ( self : int , lowercase : List[str] , **lowercase : List[Any] ):
'''simple docstring'''
self.engine.backward(lowercase , **lowercase )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , lowercase : List[str] ):
'''simple docstring'''
super().__init__(lowercase , device_placement=lowercase , scaler=lowercase )
_snake_case = hasattr(self.optimizer , 'overflow' )
def A ( self : Tuple , lowercase : str=None ):
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def A ( self : Any ):
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def A ( self : Dict ):
'''simple docstring'''
if self.__has_overflow__:
return self.optimizer.overflow
return False
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase : Dict , lowercase : Optional[int] ):
'''simple docstring'''
super().__init__(lowercase , lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : Optional[int] , lowercase : List[Any]=0.001 , lowercase : List[str]=0 , **lowercase : Tuple ):
'''simple docstring'''
_snake_case = params
_snake_case = lr
_snake_case = weight_decay
_snake_case = kwargs
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : List[str] , lowercase : Optional[int] , lowercase : Union[str, Any]=None , lowercase : Optional[int]=0 , **lowercase : Optional[Any] ):
'''simple docstring'''
_snake_case = optimizer
_snake_case = total_num_steps
_snake_case = warmup_num_steps
_snake_case = kwargs | 686 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_lowerCamelCase : Dict = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = "sequence-classification"
def __init__( self : Optional[int] , lowercase : Any ):
'''simple docstring'''
if type(lowercase ) == dict:
_snake_case = Namespace(**lowercase )
_snake_case = glue_output_modes[hparams.task]
_snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(lowercase , lowercase , self.mode )
def A ( self : Optional[Any] , **lowercase : Optional[Any] ):
'''simple docstring'''
return self.model(**lowercase )
def A ( self : Optional[Any] , lowercase : str , lowercase : Tuple ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case = outputs[0]
_snake_case = self.trainer.lr_schedulers[0]['scheduler']
_snake_case = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.hparams
_snake_case = processors[args.task]()
_snake_case = processor.get_labels()
for mode in ["train", "dev"]:
_snake_case = self._feature_file(lowercase )
if os.path.exists(lowercase ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , lowercase )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_snake_case = convert_examples_to_features(
lowercase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , lowercase )
torch.save(lowercase , lowercase )
def A ( self : Dict , lowercase : str , lowercase : int , lowercase : bool = False ):
'''simple docstring'''
_snake_case = 'dev' if mode == 'test' else mode
_snake_case = self._feature_file(lowercase )
logger.info('Loading features from cached file %s' , lowercase )
_snake_case = torch.load(lowercase )
_snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(lowercase , lowercase , lowercase , lowercase ) , batch_size=lowercase , shuffle=lowercase , )
def A ( self : str , lowercase : Optional[Any] , lowercase : str ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case , _snake_case = outputs[:2]
_snake_case = logits.detach().cpu().numpy()
_snake_case = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : int , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_snake_case = np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_snake_case = np.argmax(lowercase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_snake_case = np.squeeze(lowercase )
_snake_case = np.concatenate([x['target'] for x in outputs] , axis=0 )
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , lowercase , lowercase )}
_snake_case = dict(results.items() )
_snake_case = results
return ret, preds_list, out_label_list
def A ( self : int , lowercase : list ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : List[str] , lowercase : Any ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( lowercase : Tuple , lowercase : Any ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(lowercase , lowercase )
parser.add_argument(
'--max_seq_length' , default=128 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=lowercase , required=lowercase , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=lowercase , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def a_ ( ) -> Union[str, Any]:
_snake_case = argparse.ArgumentParser()
add_generic_args(__lowercase , os.getcwd() )
_snake_case = GLUETransformer.add_model_specific_args(__lowercase , os.getcwd() )
_snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_snake_case = os.path.join(
'./results' , f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_snake_case = GLUETransformer(__lowercase )
_snake_case = generic_train(__lowercase , __lowercase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_snake_case = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=__lowercase ) )
_snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__lowercase )
if __name__ == "__main__":
main() | 686 | 1 |
from __future__ import annotations
def a_ ( __lowercase : float , __lowercase : float , __lowercase : float , ) -> tuple[str, float]:
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = LEDConfig
_UpperCAmelCase : int = {}
_UpperCAmelCase : List[str] = "gelu"
def __init__( self : Union[str, Any] , lowercase : Optional[int] , lowercase : Dict=13 , lowercase : Dict=7 , lowercase : Tuple=True , lowercase : Dict=False , lowercase : Dict=99 , lowercase : Any=32 , lowercase : List[Any]=2 , lowercase : List[str]=4 , lowercase : List[str]=37 , lowercase : Dict=0.1 , lowercase : int=0.1 , lowercase : List[Any]=20 , lowercase : int=2 , lowercase : Optional[Any]=1 , lowercase : List[str]=0 , lowercase : Optional[int]=4 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = eos_token_id
_snake_case = pad_token_id
_snake_case = bos_token_id
_snake_case = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case = prepare_led_inputs_dict(lowercase , lowercase , lowercase )
_snake_case = tf.concat(
[tf.zeros_like(lowercase )[:, :-1], tf.ones_like(lowercase )[:, -1:]] , axis=-1 , )
_snake_case = global_attention_mask
return config, inputs_dict
def A ( self : str , lowercase : str , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = TFLEDModel(config=lowercase ).get_decoder()
_snake_case = inputs_dict['input_ids']
_snake_case = input_ids[:1, :]
_snake_case = inputs_dict['attention_mask'][:1, :]
_snake_case = 1
# first forward pass
_snake_case = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
_snake_case , _snake_case = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case = model(lowercase , attention_mask=lowercase )[0]
_snake_case = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case = output_from_no_past[:, -3:, random_slice_idx]
_snake_case = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : str=None , ) -> Union[str, Any]:
if attention_mask is None:
_snake_case = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_UpperCAmelCase : Optional[int] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_UpperCAmelCase : Tuple = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCAmelCase : str = True
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : str = False
_UpperCAmelCase : List[Any] = False
def A ( self : Any ):
'''simple docstring'''
_snake_case = TFLEDModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = tf.zeros_like(inputs_dict['attention_mask'] )
_snake_case = 2
_snake_case = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
_snake_case = True
_snake_case = self.model_tester.seq_length
_snake_case = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase : List[str] ):
_snake_case = outputs.decoder_attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase : List[str] ):
_snake_case = [t.numpy() for t in outputs.encoder_attentions]
_snake_case = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = False
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
_snake_case = len(lowercase )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
if self.is_encoder_decoder:
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_decoder_attentions_output(lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
# Check attention is always last and order is fine
_snake_case = True
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase ) )
self.assertEqual(model.config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def A ( self : List[Any] ):
'''simple docstring'''
pass
def A ( self : Any ):
'''simple docstring'''
pass
def a_ ( __lowercase : str ) -> Optional[Any]:
return tf.constant(__lowercase , dtype=tf.intaa )
_lowerCamelCase : List[Any] = 1E-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 )
def A ( self : str ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 , rtol=1E-3 ) | 686 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : int = {
'''facebook/deit-base-distilled-patch16-224''': (
'''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'''
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[str] = "deit"
def __init__( self : Optional[int] , lowercase : Optional[Any]=768 , lowercase : List[Any]=12 , lowercase : List[Any]=12 , lowercase : str=3_072 , lowercase : Union[str, Any]="gelu" , lowercase : Union[str, Any]=0.0 , lowercase : Any=0.0 , lowercase : Union[str, Any]=0.02 , lowercase : Optional[int]=1E-12 , lowercase : str=224 , lowercase : int=16 , lowercase : Tuple=3 , lowercase : List[str]=True , lowercase : List[str]=16 , **lowercase : Optional[Any] , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = qkv_bias
_snake_case = encoder_stride
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = version.parse("1.11" )
@property
def A ( self : List[Any] ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Any ):
'''simple docstring'''
return 1E-4 | 686 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_lowerCamelCase : Union[str, Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_lowerCamelCase : Union[str, Any] = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
_lowerCamelCase : Optional[int] = '''zero2'''
_lowerCamelCase : List[Any] = '''zero3'''
_lowerCamelCase : Dict = [ZEROa, ZEROa]
def a_ ( __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Tuple ) -> Dict:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_snake_case = parameterized.to_safe_name('_'.join(str(__lowercase ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
_lowerCamelCase : Dict = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : List[Any] , lowercase : Dict ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Any , lowercase : str , lowercase : List[str] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : Optional[Any] , lowercase : Optional[int] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
def A ( self : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
pass
def A ( self : str , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : bool = True , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = models[model]
_snake_case = self.run_trainer(
stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , )
self.do_checks(lowercase )
return output_dir
def A ( self : Any , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : int = 1 , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase )
_snake_case = f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_snake_case = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_snake_case = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_snake_case = self.get_launcher(lowercase )
_snake_case = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase , env=self.get_env() )
return output_dir
def A ( self : List[str] , lowercase : Any=False ):
'''simple docstring'''
_snake_case = min(2 , get_gpu_count() ) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split() | 686 | 1 |
from math import factorial
def a_ ( __lowercase : int , __lowercase : int , __lowercase : float ) -> float:
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(__lowercase , __lowercase ) or not isinstance(__lowercase , __lowercase ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_snake_case = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_snake_case = float(factorial(__lowercase ) )
coefficient /= factorial(__lowercase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.7_5)) | 686 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCamelCase : int = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowerCamelCase : Optional[int] = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 |
import random
from .binary_exp_mod import bin_exp_mod
def a_ ( __lowercase : int , __lowercase : Any=1_000 ) -> int:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_snake_case = n - 1
_snake_case = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_snake_case = 0
while count < prec:
_snake_case = random.randint(2 , n - 1 )
_snake_case = bin_exp_mod(__lowercase , __lowercase , __lowercase )
if b != 1:
_snake_case = True
for _ in range(__lowercase ):
if b == n - 1:
_snake_case = False
break
_snake_case = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_lowerCamelCase : Tuple = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i))) | 686 | 1 |
def a_ ( __lowercase : int ) -> int:
if not isinstance(__lowercase , __lowercase ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_lowerCamelCase : int = re.compile(r'''\s+''')
def a_ ( __lowercase : List[Any] ) -> int:
return {"hash": hashlib.mda(re.sub(__lowercase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def a_ ( __lowercase : List[Any] ) -> Dict:
_snake_case = [len(__lowercase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(__lowercase ), "line_max": max(__lowercase )}
def a_ ( __lowercase : Optional[int] ) -> List[str]:
_snake_case = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] ) -> Optional[int]:
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def a_ ( __lowercase : Union[str, Any] , __lowercase : int=5 ) -> Optional[Any]:
_snake_case = ['auto-generated', 'autogenerated', 'automatically generated']
_snake_case = example['content'].splitlines()
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a_ ( __lowercase : List[Any] , __lowercase : int=5 , __lowercase : Tuple=0.0_5 ) -> Union[str, Any]:
_snake_case = ['unit tests', 'test file', 'configuration file']
_snake_case = example['content'].splitlines()
_snake_case = 0
_snake_case = 0
# first test
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_snake_case = example['content'].count('\n' )
_snake_case = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a_ ( __lowercase : Union[str, Any] ) -> Any:
_snake_case = ['def ', 'class ', 'for ', 'while ']
_snake_case = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a_ ( __lowercase : Tuple , __lowercase : Any=4 ) -> List[str]:
_snake_case = example['content'].splitlines()
_snake_case = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a_ ( __lowercase : Dict ) -> Dict:
_snake_case = tokenizer(example['content'] , truncation=__lowercase )['input_ids']
_snake_case = len(example['content'] ) / len(__lowercase )
return {"ratio": ratio}
def a_ ( __lowercase : Optional[Any] ) -> Any:
_snake_case = {}
results.update(get_hash(__lowercase ) )
results.update(line_stats(__lowercase ) )
results.update(alpha_stats(__lowercase ) )
results.update(char_token_ratio(__lowercase ) )
results.update(is_autogenerated(__lowercase ) )
results.update(is_config_or_test(__lowercase ) )
results.update(has_no_keywords(__lowercase ) )
results.update(has_few_assignments(__lowercase ) )
return results
def a_ ( __lowercase : Optional[int] , __lowercase : str , __lowercase : List[Any] ) -> int:
if not check_uniques(__lowercase , __lowercase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a_ ( __lowercase : Dict ) -> Dict:
with open(__lowercase , 'rb' ) as f_in:
with gzip.open(str(__lowercase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(__lowercase , __lowercase )
os.unlink(__lowercase )
# Settings
_lowerCamelCase : Dict = HfArgumentParser(PreprocessingArguments)
_lowerCamelCase : Dict = parser.parse_args()
if args.num_workers is None:
_lowerCamelCase : int = multiprocessing.cpu_count()
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_lowerCamelCase : Any = time.time()
_lowerCamelCase : Optional[Any] = load_dataset(args.dataset_name, split='''train''')
print(F'Time to load dataset: {time.time()-t_start:.2f}')
# Run preprocessing
_lowerCamelCase : Optional[int] = time.time()
_lowerCamelCase : Union[str, Any] = ds.map(preprocess, num_proc=args.num_workers)
print(F'Time to preprocess dataset: {time.time()-t_start:.2f}')
# Deduplicate hashes
_lowerCamelCase : List[Any] = set(ds.unique('''hash'''))
_lowerCamelCase : Dict = len(uniques) / len(ds)
print(F'Fraction of duplicates: {1-frac:.2%}')
# Deduplicate data and apply heuristics
_lowerCamelCase : List[Any] = time.time()
_lowerCamelCase : Optional[int] = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F'Time to filter dataset: {time.time()-t_start:.2f}')
print(F'Size of filtered dataset: {len(ds_filter)}')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_lowerCamelCase : Union[str, Any] = time.time()
_lowerCamelCase , _lowerCamelCase : Dict = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'Time to deduplicate dataset: {time.time()-t_start:.2f}')
print(F'Size of deduplicate dataset: {len(ds_filter)}')
# Save data in batches of samples_per_file
_lowerCamelCase : Optional[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
_lowerCamelCase : int = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
_lowerCamelCase : Union[str, Any] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_lowerCamelCase : Dict = str(data_dir / F'file-{file_number+1:012}.json')
_lowerCamelCase : str = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'Time to save dataset: {time.time()-t_start:.2f}') | 686 | 1 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_lowerCamelCase : Optional[int] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_lowerCamelCase : int = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_lowerCamelCase : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_000))
def a_ ( __lowercase : str , __lowercase : str ) -> tuple[str, float]:
_snake_case = len([g for position, g in enumerate(__lowercase ) if g == main_target[position]] )
return (item, float(__lowercase ))
def a_ ( __lowercase : str , __lowercase : str ) -> tuple[str, str]:
_snake_case = random.randint(0 , len(__lowercase ) - 1 )
_snake_case = parent_a[:random_slice] + parent_a[random_slice:]
_snake_case = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def a_ ( __lowercase : str , __lowercase : list[str] ) -> str:
_snake_case = list(__lowercase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_snake_case = random.choice(__lowercase )
return "".join(__lowercase )
def a_ ( __lowercase : tuple[str, float] , __lowercase : list[tuple[str, float]] , __lowercase : list[str] , ) -> list[str]:
_snake_case = []
# Generate more children proportionally to the fitness score.
_snake_case = int(parent_a[1] * 100 ) + 1
_snake_case = 10 if child_n >= 10 else child_n
for _ in range(__lowercase ):
_snake_case = population_score[random.randint(0 , __lowercase )][0]
_snake_case , _snake_case = crossover(parent_a[0] , __lowercase )
# Append new string to the population list.
pop.append(mutate(__lowercase , __lowercase ) )
pop.append(mutate(__lowercase , __lowercase ) )
return pop
def a_ ( __lowercase : str , __lowercase : list[str] , __lowercase : bool = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
_snake_case = f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(__lowercase )
# Verify that the target contains no genes besides the ones inside genes variable.
_snake_case = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_snake_case = f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(__lowercase )
# Generate random starting population.
_snake_case = []
for _ in range(__lowercase ):
population.append(''.join([random.choice(__lowercase ) for i in range(len(__lowercase ) )] ) )
# Just some logs to know what the algorithms is doing.
_snake_case , _snake_case = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__lowercase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_snake_case = [evaluate(__lowercase , __lowercase ) for item in population]
# Check if there is a matching evolution.
_snake_case = sorted(__lowercase , key=lambda __lowercase : x[1] , reverse=__lowercase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_snake_case = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__lowercase )
# Normalize population score to be between 0 and 1.
_snake_case = [
(item, score / len(__lowercase )) for item, score in population_score
]
# This is selection
for i in range(__lowercase ):
population.extend(select(population_score[int(__lowercase )] , __lowercase , __lowercase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__lowercase ) > N_POPULATION:
break
if __name__ == "__main__":
_lowerCamelCase : int = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
_lowerCamelCase : Any = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[int] = basic(target_str, genes_list)
print(
F'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
) | 686 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : int = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = "yolos"
def __init__( self : int , lowercase : List[str]=768 , lowercase : Tuple=12 , lowercase : int=12 , lowercase : int=3_072 , lowercase : Optional[int]="gelu" , lowercase : str=0.0 , lowercase : Optional[int]=0.0 , lowercase : Optional[Any]=0.02 , lowercase : List[str]=1E-12 , lowercase : Dict=[512, 864] , lowercase : Union[str, Any]=16 , lowercase : List[Any]=3 , lowercase : List[str]=True , lowercase : Optional[int]=100 , lowercase : int=True , lowercase : Dict=False , lowercase : str=1 , lowercase : int=5 , lowercase : Tuple=2 , lowercase : List[str]=5 , lowercase : Any=2 , lowercase : List[str]=0.1 , **lowercase : int , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = qkv_bias
_snake_case = num_detection_tokens
_snake_case = use_mid_position_embeddings
_snake_case = auxiliary_loss
# Hungarian matcher
_snake_case = class_cost
_snake_case = bbox_cost
_snake_case = giou_cost
# Loss coefficients
_snake_case = bbox_loss_coefficient
_snake_case = giou_loss_coefficient
_snake_case = eos_coefficient
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = version.parse("1.11" )
@property
def A ( self : str ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Any ):
'''simple docstring'''
return 1E-4
@property
def A ( self : List[Any] ):
'''simple docstring'''
return 12 | 686 | 1 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
_lowerCamelCase : Dict = ['''gpt2''']
_lowerCamelCase : Tuple = '''gpt2'''
if is_tf_available():
class SCREAMING_SNAKE_CASE__ ( tf.Module ):
'''simple docstring'''
def __init__( self : int , lowercase : Union[str, Any] ):
'''simple docstring'''
super().__init__()
_snake_case = tokenizer
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = TFGPTaLMHeadModel.from_config(lowercase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def A ( self : int , lowercase : Any ):
'''simple docstring'''
_snake_case = self.tokenizer(lowercase )
_snake_case = tokenized['input_ids'].to_tensor()
_snake_case = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_snake_case = self.model(input_ids=lowercase , attention_mask=lowercase )['logits']
return outputs
@require_tf
@require_keras_nlp
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : str ):
'''simple docstring'''
super().setUp()
_snake_case = [GPTaTokenizer.from_pretrained(lowercase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_snake_case = [TFGPTaTokenizer.from_pretrained(lowercase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_snake_case = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_snake_case = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def A ( self : List[str] ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
_snake_case = tokenizer([test_inputs] , return_tensors='tf' )
_snake_case = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_snake_case = python_outputs[key].numpy()
_snake_case = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(lowercase , tf.intaa ) == tf_outputs_values ) )
@slow
def A ( self : Optional[int] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case = tf.function(lowercase )
for test_inputs in self.test_sentences:
_snake_case = tf.constant(lowercase )
_snake_case = compiled_tokenizer(lowercase )
_snake_case = tf_tokenizer(lowercase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def A ( self : int ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case = ModelToSave(tokenizer=lowercase )
_snake_case = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case = model.serving(lowercase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_snake_case = Path(lowercase ) / 'saved.model'
tf.saved_model.save(lowercase , lowercase , signatures={'serving_default': model.serving} )
_snake_case = tf.saved_model.load(lowercase )
_snake_case = loaded_model.signatures['serving_default'](lowercase )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def A ( self : int ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case = tf_tokenizer(lowercase ) # Build model with some sample inputs
_snake_case = tf_tokenizer.get_config()
_snake_case = TFGPTaTokenizer.from_config(lowercase )
_snake_case = model_from_config(lowercase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_snake_case = 123_123
for max_length in [3, 5, 1_024]:
_snake_case = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case = tf_tokenizer(lowercase , max_length=lowercase )
_snake_case = out['input_ids'].numpy().shape[1]
assert out_length == max_length | 686 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_lowerCamelCase : Tuple = logging.get_logger(__name__)
# General docstring
_lowerCamelCase : Union[str, Any] = '''ResNetConfig'''
# Base docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[Any] = [1, 2_048, 7, 7]
# Image classification docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[int] = '''tiger cat'''
_lowerCamelCase : str = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 3 , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(
lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=kernel_size // 2 , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def A ( self : Union[str, Any] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def A ( self : Tuple , lowercase : Tensor ):
'''simple docstring'''
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowercase )
_snake_case = self.pooler(lowercase )
return embedding
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase : int , lowercase : int , lowercase : int = 2 ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(lowercase , lowercase , kernel_size=1 , stride=lowercase , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : List[str] , lowercase : List[str] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" , lowercase : int = 4 ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , kernel_size=1 ) , ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : Dict , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowercase : ResNetConfig , lowercase : int , lowercase : int , lowercase : int = 2 , lowercase : int = 2 , ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , stride=lowercase , activation=config.hidden_act ) , *[layer(lowercase , lowercase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase , config.depths[1:] ):
self.stages.append(ResNetStage(lowercase , lowercase , lowercase , depth=lowercase ) )
def A ( self : str , lowercase : Tensor , lowercase : bool = False , lowercase : bool = True ):
'''simple docstring'''
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowercase )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase , hidden_states=lowercase , )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = ResNetConfig
_UpperCAmelCase : Tuple = "resnet"
_UpperCAmelCase : Optional[Any] = "pixel_values"
_UpperCAmelCase : Dict = True
def A ( self : List[str] , lowercase : Dict ):
'''simple docstring'''
if isinstance(lowercase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowercase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def A ( self : Tuple , lowercase : List[Any] , lowercase : Optional[Any]=False ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
_snake_case = value
_lowerCamelCase : str = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCamelCase : int = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : Any ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : Union[str, Any] , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowercase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase : int ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowercase )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Union[str, Any] , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[torch.LongTensor] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowercase )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowercase , lowercase )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : Union[str, Any] ):
'''simple docstring'''
super().__init__(lowercase )
super()._init_backbone(lowercase )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@replace_return_docstrings(output_type=lowercase , config_class=_CONFIG_FOR_DOC )
def A ( self : Dict , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowercase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowercase , ) | 686 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : Any = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
_lowerCamelCase : Optional[int] = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Dict = ["input_ids", "attention_mask"]
_UpperCAmelCase : Union[str, Any] = RobertaTokenizer
def __init__( self : Optional[int] , lowercase : str=None , lowercase : Any=None , lowercase : Tuple=None , lowercase : List[Any]="replace" , lowercase : str="<s>" , lowercase : Union[str, Any]="</s>" , lowercase : Optional[Any]="</s>" , lowercase : Optional[Any]="<s>" , lowercase : Optional[Any]="<unk>" , lowercase : Dict="<pad>" , lowercase : Any="<mask>" , lowercase : Dict=False , lowercase : Dict=True , **lowercase : Optional[int] , ):
'''simple docstring'''
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
_snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase ) != add_prefix_space:
_snake_case = getattr(lowercase , pre_tok_state.pop('type' ) )
_snake_case = add_prefix_space
_snake_case = pre_tok_class(**lowercase )
_snake_case = add_prefix_space
_snake_case = 'post_processor'
_snake_case = getattr(self.backend_tokenizer , lowercase , lowercase )
if tokenizer_component_instance:
_snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_snake_case = tuple(state['sep'] )
if "cls" in state:
_snake_case = tuple(state['cls'] )
_snake_case = False
if state.get('add_prefix_space' , lowercase ) != add_prefix_space:
_snake_case = add_prefix_space
_snake_case = True
if state.get('trim_offsets' , lowercase ) != trim_offsets:
_snake_case = trim_offsets
_snake_case = True
if changes_to_apply:
_snake_case = getattr(lowercase , state.pop('type' ) )
_snake_case = component_class(**lowercase )
setattr(self.backend_tokenizer , lowercase , lowercase )
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def A ( self : List[Any] , lowercase : Any ):
'''simple docstring'''
_snake_case = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else value
_snake_case = value
def A ( self : List[Any] , *lowercase : Any , **lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = kwargs.get('is_split_into_words' , lowercase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowercase , **lowercase )
def A ( self : Dict , *lowercase : Dict , **lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = kwargs.get('is_split_into_words' , lowercase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowercase , **lowercase )
def A ( self : Dict , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
_snake_case = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
def A ( self : Optional[Any] , lowercase : Optional[int] , lowercase : Optional[Any]=None ):
'''simple docstring'''
_snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A ( self : Any , lowercase : List[int] , lowercase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 686 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def a_ ( __lowercase : Union[str, Any] ) -> List[Any]:
_snake_case = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
_snake_case = DetaConfig(
backbone_config=__lowercase , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=__lowercase , with_box_refine=__lowercase , two_stage=__lowercase , )
# set labels
_snake_case = 'huggingface/label-files'
if "o365" in model_name:
_snake_case = 366
_snake_case = 'object365-id2label.json'
else:
_snake_case = 91
_snake_case = 'coco-detection-id2label.json'
_snake_case = num_labels
_snake_case = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type='dataset' ) ) , 'r' ) )
_snake_case = {int(__lowercase ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def a_ ( __lowercase : int ) -> str:
_snake_case = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : str ) -> Union[str, Any]:
_snake_case = dct.pop(__lowercase )
_snake_case = val
def a_ ( __lowercase : List[str] , __lowercase : str ) -> Dict:
_snake_case = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_snake_case = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:dim, :]
_snake_case = in_proj_bias[: dim]
_snake_case = in_proj_weight[
dim : dim * 2, :
]
_snake_case = in_proj_bias[
dim : dim * 2
]
_snake_case = in_proj_weight[
-dim :, :
]
_snake_case = in_proj_bias[-dim :]
# fmt: on
def a_ ( __lowercase : Dict , __lowercase : Dict ) -> str:
# transformer decoder self-attention layers
_snake_case = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:hidden_size, :]
_snake_case = in_proj_bias[:hidden_size]
_snake_case = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_snake_case = in_proj_bias[hidden_size : hidden_size * 2]
_snake_case = in_proj_weight[-hidden_size:, :]
_snake_case = in_proj_bias[-hidden_size:]
def a_ ( ) -> List[str]:
_snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a_ ( __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Tuple ) -> Optional[Any]:
_snake_case = get_deta_config(__lowercase )
# load original state dict
if model_name == "deta-swin-large":
_snake_case = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
_snake_case = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
_snake_case = torch.load(__lowercase , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(__lowercase , param.shape )
# rename keys
_snake_case = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_swin_q_k_v(__lowercase , config.backbone_config )
read_in_decoder_q_k_v(__lowercase , __lowercase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "input_proj" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
# finally, create HuggingFace model and load state dict
_snake_case = DetaForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
_snake_case = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(__lowercase )
# load image processor
_snake_case = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
_snake_case = prepare_img()
_snake_case = processor(images=__lowercase , return_tensors='pt' )
_snake_case = encoding['pixel_values']
_snake_case = model(pixel_values.to(__lowercase ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_snake_case = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
_snake_case = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
_snake_case = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
_snake_case = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__lowercase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__lowercase ) , atol=1E-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 686 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def a_ ( __lowercase : Union[str, Any] ) -> List[Any]:
_snake_case = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
_snake_case = DetaConfig(
backbone_config=__lowercase , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=__lowercase , with_box_refine=__lowercase , two_stage=__lowercase , )
# set labels
_snake_case = 'huggingface/label-files'
if "o365" in model_name:
_snake_case = 366
_snake_case = 'object365-id2label.json'
else:
_snake_case = 91
_snake_case = 'coco-detection-id2label.json'
_snake_case = num_labels
_snake_case = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type='dataset' ) ) , 'r' ) )
_snake_case = {int(__lowercase ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def a_ ( __lowercase : int ) -> str:
_snake_case = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : str ) -> Union[str, Any]:
_snake_case = dct.pop(__lowercase )
_snake_case = val
def a_ ( __lowercase : List[str] , __lowercase : str ) -> Dict:
_snake_case = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_snake_case = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:dim, :]
_snake_case = in_proj_bias[: dim]
_snake_case = in_proj_weight[
dim : dim * 2, :
]
_snake_case = in_proj_bias[
dim : dim * 2
]
_snake_case = in_proj_weight[
-dim :, :
]
_snake_case = in_proj_bias[-dim :]
# fmt: on
def a_ ( __lowercase : Dict , __lowercase : Dict ) -> str:
# transformer decoder self-attention layers
_snake_case = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:hidden_size, :]
_snake_case = in_proj_bias[:hidden_size]
_snake_case = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_snake_case = in_proj_bias[hidden_size : hidden_size * 2]
_snake_case = in_proj_weight[-hidden_size:, :]
_snake_case = in_proj_bias[-hidden_size:]
def a_ ( ) -> List[str]:
_snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a_ ( __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Tuple ) -> Optional[Any]:
_snake_case = get_deta_config(__lowercase )
# load original state dict
if model_name == "deta-swin-large":
_snake_case = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
_snake_case = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
_snake_case = torch.load(__lowercase , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(__lowercase , param.shape )
# rename keys
_snake_case = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_swin_q_k_v(__lowercase , config.backbone_config )
read_in_decoder_q_k_v(__lowercase , __lowercase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "input_proj" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
# finally, create HuggingFace model and load state dict
_snake_case = DetaForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
_snake_case = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(__lowercase )
# load image processor
_snake_case = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
_snake_case = prepare_img()
_snake_case = processor(images=__lowercase , return_tensors='pt' )
_snake_case = encoding['pixel_values']
_snake_case = model(pixel_values.to(__lowercase ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_snake_case = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
_snake_case = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
_snake_case = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
_snake_case = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__lowercase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__lowercase ) , atol=1E-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 686 | 1 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : List[Any] , lowercase : Union[str, Any] , lowercase : int ):
'''simple docstring'''
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(lowercase ) for s in shape] )}.npy'''
def A ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def A ( self : List[Any] , lowercase : Tuple=0 , lowercase : Optional[int]=(4, 4, 64, 64) , lowercase : Optional[int]=False ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase )
return image
def A ( self : Tuple , lowercase : Any=False , lowercase : Union[str, Any]="CompVis/stable-diffusion-v1-4" ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = 'bf16' if fpaa else None
_snake_case , _snake_case = FlaxUNetaDConditionModel.from_pretrained(
lowercase , subfolder='unet' , dtype=lowercase , revision=lowercase )
return model, params
def A ( self : Union[str, Any] , lowercase : str=0 , lowercase : Optional[Any]=(4, 77, 768) , lowercase : int=False ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def A ( self : Tuple , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : List[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=lowercase )
_snake_case = self.get_latents(lowercase , fpaa=lowercase )
_snake_case = self.get_encoder_hidden_states(lowercase , fpaa=lowercase )
_snake_case = model.apply(
{'params': params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
_snake_case = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_snake_case = jnp.array(lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def A ( self : str , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : List[str] ):
'''simple docstring'''
_snake_case , _snake_case = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=lowercase )
_snake_case = self.get_latents(lowercase , shape=(4, 4, 96, 96) , fpaa=lowercase )
_snake_case = self.get_encoder_hidden_states(lowercase , shape=(4, 77, 1_024) , fpaa=lowercase )
_snake_case = model.apply(
{'params': params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
_snake_case = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_snake_case = jnp.array(lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1E-2 ) | 686 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_lowerCamelCase : Dict = '''pt'''
elif is_tf_available():
_lowerCamelCase : List[str] = '''tf'''
else:
_lowerCamelCase : List[Any] = '''jax'''
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = PerceiverTokenizer
_UpperCAmelCase : Optional[int] = False
def A ( self : Tuple ):
'''simple docstring'''
super().setUp()
_snake_case = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A ( self : str ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def A ( self : Optional[int] , **lowercase : Dict ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : Optional[int] , lowercase : Tuple , lowercase : Optional[Any]=False , lowercase : int=20 , lowercase : Optional[int]=5 ):
'''simple docstring'''
_snake_case = []
for i in range(len(lowercase ) ):
try:
_snake_case = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_snake_case = list(filter(lambda lowercase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , lowercase ) )
_snake_case = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
_snake_case = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
_snake_case = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case = [t[0] for t in toks]
# Ensure consistency
_snake_case = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
_snake_case = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
_snake_case = ' ' + output_txt
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = 'Unicode €.'
_snake_case = tokenizer(lowercase )
_snake_case = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_snake_case = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]Unicode €.[SEP]' )
_snake_case = tokenizer('e è é ê ë' )
_snake_case = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_snake_case = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_snake_case = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
if FRAMEWORK != "jax":
_snake_case = list(batch.input_ids.numpy()[0] )
else:
_snake_case = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , lowercase )
self.assertIn('attention_mask' , lowercase )
self.assertNotIn('decoder_input_ids' , lowercase )
self.assertNotIn('decoder_attention_mask' , lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = [
'Summary of the text.',
'Another summary.',
]
_snake_case = tokenizer(
text_target=lowercase , max_length=32 , padding='max_length' , truncation=lowercase , return_tensors=lowercase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_snake_case = tokenizer.__class__.from_pretrained(lowercase )
_snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
shutil.rmtree(lowercase )
_snake_case = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_snake_case = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_snake_case = tokenizer.__class__.from_pretrained(lowercase )
_snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_snake_case = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowercase )
_snake_case = [f'''<extra_id_{i}>''' for i in range(125 )]
_snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
_snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case = tokenizer_class.from_pretrained(
lowercase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=lowercase )]
_snake_case = tokenizer_class.from_pretrained(
lowercase , additional_special_tokens=lowercase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
pass
def A ( self : List[str] ):
'''simple docstring'''
pass
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : int ):
'''simple docstring'''
_snake_case = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_snake_case = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
_snake_case = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(lowercase , lowercase ) | 686 | 1 |
import os
import sys
import unittest
_lowerCamelCase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_lowerCamelCase : Dict = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
_lowerCamelCase : Dict = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : int ):
'''simple docstring'''
_snake_case = get_test_to_tester_mapping(lowercase )
_snake_case = get_test_to_tester_mapping(lowercase )
_snake_case = {'BertModelTest': 'BertModelTester'}
_snake_case = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = get_model_to_test_mapping(lowercase )
_snake_case = get_model_to_test_mapping(lowercase )
_snake_case = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
_snake_case = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = get_model_to_tester_mapping(lowercase )
_snake_case = get_model_to_tester_mapping(lowercase )
_snake_case = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
_snake_case = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) | 686 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def a_ ( ) -> Optional[int]:
_snake_case , _snake_case = 9, 14 # noqa: F841
_snake_case = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_snake_case = defaultdict(__lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_snake_case = mst(__lowercase )
_snake_case = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_snake_case = tuple(answer[:2] )
_snake_case = tuple(edge[::-1] )
assert edge in result or reverse in result | 686 | 1 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def A ( self : List[str] , lowercase : Union[str, Any]=0 ):
'''simple docstring'''
_snake_case = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowercase ) )
_snake_case = np.random.RandomState(lowercase )
_snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=lowercase )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**lowercase ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
_snake_case = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**lowercase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_snake_case = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def A ( self : Any ):
'''simple docstring'''
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase )
# warmup pass to apply optimizations
_snake_case = pipe(**self.get_dummy_inputs() )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**lowercase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_snake_case = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**lowercase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_snake_case = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def A ( self : str ):
'''simple docstring'''
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**lowercase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_snake_case = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**lowercase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_snake_case = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@property
def A ( self : Tuple ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A ( self : Dict ):
'''simple docstring'''
_snake_case = ort.SessionOptions()
_snake_case = False
return options
def A ( self : Dict ):
'''simple docstring'''
_snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
_snake_case = init_image.resize((768, 512) )
# using the PNDM scheduler by default
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=lowercase , feature_extractor=lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase )
_snake_case = 'A fantasy landscape, trending on artstation'
_snake_case = np.random.RandomState(0 )
_snake_case = pipe(
prompt=lowercase , image=lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase , output_type='np' , )
_snake_case = output.images
_snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
_snake_case = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
_snake_case = init_image.resize((768, 512) )
_snake_case = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase )
_snake_case = 'A fantasy landscape, trending on artstation'
_snake_case = np.random.RandomState(0 )
_snake_case = pipe(
prompt=lowercase , image=lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=lowercase , output_type='np' , )
_snake_case = output.images
_snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
_snake_case = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 | 686 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Tuple = ["transformers", "torch", "note_seq"]
def __init__( self : List[Any] , *lowercase : List[Any] , **lowercase : Dict ):
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] ) | 686 | 1 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowerCamelCase : Any = logging.get_logger(__name__)
# General docstring
_lowerCamelCase : Optional[Any] = '''RegNetConfig'''
# Base docstring
_lowerCamelCase : int = '''facebook/regnet-y-040'''
_lowerCamelCase : Optional[Any] = [1, 1_088, 7, 7]
# Image classification docstring
_lowerCamelCase : Dict = '''facebook/regnet-y-040'''
_lowerCamelCase : str = '''tabby, tabby cat'''
_lowerCamelCase : Dict = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase : int , lowercase : int = 3 , lowercase : int = 1 , lowercase : int = 1 , lowercase : Optional[str] = "relu" , **lowercase : str , ):
'''simple docstring'''
super().__init__(**lowercase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_snake_case = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_snake_case = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=lowercase , strides=lowercase , padding='VALID' , groups=lowercase , use_bias=lowercase , name='convolution' , )
_snake_case = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
_snake_case = ACTaFN[activation] if activation is not None else tf.identity
def A ( self : str , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = self.convolution(self.padding(lowercase ) )
_snake_case = self.normalization(lowercase )
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : RegNetConfig , **lowercase : Any ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = config.num_channels
_snake_case = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def A ( self : Any , lowercase : Tuple ):
'''simple docstring'''
_snake_case = shape_list(lowercase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_snake_case = tf.transpose(lowercase , perm=(0, 2, 3, 1) )
_snake_case = self.embedder(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase : int , lowercase : int = 2 , **lowercase : List[Any] ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=1 , strides=lowercase , use_bias=lowercase , name='convolution' )
_snake_case = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def A ( self : Tuple , lowercase : tf.Tensor , lowercase : bool = False ):
'''simple docstring'''
return self.normalization(self.convolution(lowercase ) , training=lowercase )
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase : int , lowercase : int , **lowercase : List[Any] ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' )
_snake_case = [
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def A ( self : Optional[int] , lowercase : Dict ):
'''simple docstring'''
_snake_case = self.pooler(lowercase )
for layer_module in self.attention:
_snake_case = layer_module(lowercase )
_snake_case = hidden_state * pooled
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Any , lowercase : RegNetConfig , lowercase : int , lowercase : int , lowercase : int = 1 , **lowercase : List[Any] ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = in_channels != out_channels or stride != 1
_snake_case = max(1 , out_channels // config.groups_width )
_snake_case = (
TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_snake_case = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.2' ),
]
_snake_case = ACTaFN[config.hidden_act]
def A ( self : str , lowercase : str ):
'''simple docstring'''
_snake_case = hidden_state
for layer_module in self.layers:
_snake_case = layer_module(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : RegNetConfig , lowercase : int , lowercase : int , lowercase : int = 1 , **lowercase : Tuple ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = in_channels != out_channels or stride != 1
_snake_case = max(1 , out_channels // config.groups_width )
_snake_case = (
TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
_snake_case = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.3' ),
]
_snake_case = ACTaFN[config.hidden_act]
def A ( self : Tuple , lowercase : int ):
'''simple docstring'''
_snake_case = hidden_state
for layer_module in self.layers:
_snake_case = layer_module(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[str] , lowercase : RegNetConfig , lowercase : int , lowercase : int , lowercase : int = 2 , lowercase : int = 2 , **lowercase : Tuple ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
_snake_case = [
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , lowercase , stride=lowercase , name='layers.0' ),
*[layer(lowercase , lowercase , lowercase , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def A ( self : Optional[Any] , lowercase : Dict ):
'''simple docstring'''
for layer_module in self.layers:
_snake_case = layer_module(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : RegNetConfig , **lowercase : str ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowercase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowercase , lowercase , lowercase , depth=lowercase , name=f'''stages.{i+1}''' ) )
def A ( self : Tuple , lowercase : tf.Tensor , lowercase : bool = False , lowercase : bool = True ):
'''simple docstring'''
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowercase )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase )
@keras_serializable
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
'''simple docstring'''
_UpperCAmelCase : Tuple = RegNetConfig
def __init__( self : str , lowercase : List[str] , **lowercase : Any ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = config
_snake_case = TFRegNetEmbeddings(lowercase , name='embedder' )
_snake_case = TFRegNetEncoder(lowercase , name='encoder' )
_snake_case = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' )
@unpack_inputs
def A ( self : str , lowercase : tf.Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , lowercase : bool = False , ):
'''simple docstring'''
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowercase , training=lowercase )
_snake_case = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowercase )
# Change to NCHW output format have uniformity in the modules
_snake_case = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
_snake_case = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_snake_case = tuple([tf.transpose(lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = RegNetConfig
_UpperCAmelCase : Union[str, Any] = "regnet"
_UpperCAmelCase : Optional[Any] = "pixel_values"
@property
def A ( self : Tuple ):
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
_lowerCamelCase : List[Any] = r'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCamelCase : Optional[Any] = r'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[str] , lowercase : RegNetConfig , *lowercase : List[str] , **lowercase : Optional[int] ):
'''simple docstring'''
super().__init__(lowercase , *lowercase , **lowercase )
_snake_case = TFRegNetMainLayer(lowercase , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : List[str] , lowercase : tf.Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , lowercase : Optional[Any]=False , ):
'''simple docstring'''
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.regnet(
pixel_values=lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : RegNetConfig , *lowercase : Optional[int] , **lowercase : Union[str, Any] ):
'''simple docstring'''
super().__init__(lowercase , *lowercase , **lowercase )
_snake_case = config.num_labels
_snake_case = TFRegNetMainLayer(lowercase , name='regnet' )
# classification head
_snake_case = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Any , lowercase : tf.Tensor = None , lowercase : tf.Tensor = None , lowercase : bool = None , lowercase : bool = None , lowercase : Dict=False , ):
'''simple docstring'''
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.regnet(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier[0](lowercase )
_snake_case = self.classifier[1](lowercase )
_snake_case = None if labels is None else self.hf_compute_loss(labels=lowercase , logits=lowercase )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states ) | 686 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a_ ( ) -> Optional[Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__lowercase ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def a_ ( ) -> Optional[int]:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def a_ ( ) -> Dict:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__lowercase ):
http_head('https://huggingface.co' ) | 686 | 1 |
from __future__ import annotations
def a_ ( __lowercase : int , __lowercase : int ) -> tuple[int, int]:
if b == 0:
return (1, 0)
((_snake_case) , (_snake_case)) = extended_euclid(__lowercase , a % b )
_snake_case = a // b
return (y, x - k * y)
def a_ ( __lowercase : int , __lowercase : int , __lowercase : int , __lowercase : int ) -> int:
((_snake_case) , (_snake_case)) = extended_euclid(__lowercase , __lowercase )
_snake_case = na * na
_snake_case = ra * x * na + ra * y * na
return (n % m + m) % m
def a_ ( __lowercase : int , __lowercase : int ) -> int:
((_snake_case) , (_snake_case)) = extended_euclid(__lowercase , __lowercase )
if b < 0:
_snake_case = (b % n + n) % n
return b
def a_ ( __lowercase : int , __lowercase : int , __lowercase : int , __lowercase : int ) -> int:
_snake_case , _snake_case = invert_modulo(__lowercase , __lowercase ), invert_modulo(__lowercase , __lowercase )
_snake_case = na * na
_snake_case = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True) | 686 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCamelCase : Optional[int] = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
_lowerCamelCase : List[str] = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
_lowerCamelCase : Dict = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def A ( self : Union[str, Any] , lowercase : Tuple , lowercase : Optional[Any] , lowercase : int=None , lowercase : str=True , lowercase : List[str]=False ):
'''simple docstring'''
if rouge_types is None:
_snake_case = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
_snake_case = rouge_scorer.RougeScorer(rouge_types=lowercase , use_stemmer=lowercase )
if use_aggregator:
_snake_case = scoring.BootstrapAggregator()
else:
_snake_case = []
for ref, pred in zip(lowercase , lowercase ):
_snake_case = scorer.score(lowercase , lowercase )
if use_aggregator:
aggregator.add_scores(lowercase )
else:
scores.append(lowercase )
if use_aggregator:
_snake_case = aggregator.aggregate()
else:
_snake_case = {}
for key in scores[0]:
_snake_case = [score[key] for score in scores]
return result | 686 | 1 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = Dict[str, Any]
_lowerCamelCase : List[str] = List[Prediction]
@add_end_docstrings(UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *lowercase : Tuple , **lowercase : Dict ):
'''simple docstring'''
super().__init__(*lowercase , **lowercase )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , 'vision' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def A ( self : List[Any] , **lowercase : int ):
'''simple docstring'''
_snake_case = {}
if "threshold" in kwargs:
_snake_case = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self : Optional[Any] , *lowercase : Optional[int] , **lowercase : Optional[int] ):
'''simple docstring'''
return super().__call__(*lowercase , **lowercase )
def A ( self : int , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = load_image(lowercase )
_snake_case = torch.IntTensor([[image.height, image.width]] )
_snake_case = self.image_processor(images=[image] , return_tensors='pt' )
if self.tokenizer is not None:
_snake_case = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' )
_snake_case = target_size
return inputs
def A ( self : Dict , lowercase : Dict ):
'''simple docstring'''
_snake_case = model_inputs.pop('target_size' )
_snake_case = self.model(**lowercase )
_snake_case = outputs.__class__({'target_size': target_size, **outputs} )
if self.tokenizer is not None:
_snake_case = model_inputs['bbox']
return model_outputs
def A ( self : List[str] , lowercase : Union[str, Any] , lowercase : Optional[Any]=0.9 ):
'''simple docstring'''
_snake_case = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_snake_case , _snake_case = target_size[0].tolist()
def unnormalize(lowercase : Optional[int] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_000),
(height * bbox[1] / 1_000),
(width * bbox[2] / 1_000),
(height * bbox[3] / 1_000),
] ) )
_snake_case , _snake_case = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_snake_case = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_snake_case = [unnormalize(lowercase ) for bbox in model_outputs['bbox'].squeeze(0 )]
_snake_case = ['score', 'label', 'box']
_snake_case = [dict(zip(lowercase , lowercase ) ) for vals in zip(scores.tolist() , lowercase , lowercase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_snake_case = self.image_processor.post_process_object_detection(lowercase , lowercase , lowercase )
_snake_case = raw_annotations[0]
_snake_case = raw_annotation['scores']
_snake_case = raw_annotation['labels']
_snake_case = raw_annotation['boxes']
_snake_case = scores.tolist()
_snake_case = [self.model.config.idalabel[label.item()] for label in labels]
_snake_case = [self._get_bounding_box(lowercase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_snake_case = ['score', 'label', 'box']
_snake_case = [
dict(zip(lowercase , lowercase ) )
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] )
]
return annotation
def A ( self : Union[str, Any] , lowercase : "torch.Tensor" ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' )
_snake_case , _snake_case , _snake_case , _snake_case = box.int().tolist()
_snake_case = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox | 686 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = "swin2sr"
_UpperCAmelCase : Optional[int] = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[int] , lowercase : List[Any]=64 , lowercase : int=1 , lowercase : Union[str, Any]=3 , lowercase : Dict=180 , lowercase : List[Any]=[6, 6, 6, 6, 6, 6] , lowercase : Dict=[6, 6, 6, 6, 6, 6] , lowercase : List[Any]=8 , lowercase : List[str]=2.0 , lowercase : Tuple=True , lowercase : Union[str, Any]=0.0 , lowercase : Dict=0.0 , lowercase : Optional[int]=0.1 , lowercase : int="gelu" , lowercase : List[str]=False , lowercase : List[Any]=0.02 , lowercase : List[Any]=1E-5 , lowercase : Optional[int]=2 , lowercase : Tuple=1.0 , lowercase : List[Any]="1conv" , lowercase : List[Any]="pixelshuffle" , **lowercase : List[str] , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = embed_dim
_snake_case = depths
_snake_case = len(lowercase )
_snake_case = num_heads
_snake_case = window_size
_snake_case = mlp_ratio
_snake_case = qkv_bias
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = drop_path_rate
_snake_case = hidden_act
_snake_case = use_absolute_embeddings
_snake_case = layer_norm_eps
_snake_case = initializer_range
_snake_case = upscale
_snake_case = img_range
_snake_case = resi_connection
_snake_case = upsampler | 686 | 1 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *lowercase : str , **lowercase : Tuple ):
'''simple docstring'''
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase ) | 686 |
import random
def a_ ( __lowercase : str , __lowercase : Any , __lowercase : Any ) -> Optional[Any]:
_snake_case = a[left_index]
_snake_case = left_index + 1
for j in range(left_index + 1 , __lowercase ):
if a[j] < pivot:
_snake_case , _snake_case = a[i], a[j]
i += 1
_snake_case , _snake_case = a[i - 1], a[left_index]
return i - 1
def a_ ( __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Optional[int] ) -> Tuple:
if left < right:
_snake_case = random.randint(__lowercase , right - 1 )
_snake_case , _snake_case = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_snake_case = partition(__lowercase , __lowercase , __lowercase )
quick_sort_random(
__lowercase , __lowercase , __lowercase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__lowercase , pivot_index + 1 , __lowercase ) # recursive quicksort to the right of the pivot point
def a_ ( ) -> str:
_snake_case = input('Enter numbers separated by a comma:\n' ).strip()
_snake_case = [int(__lowercase ) for item in user_input.split(',' )]
quick_sort_random(__lowercase , 0 , len(__lowercase ) )
print(__lowercase )
if __name__ == "__main__":
main() | 686 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Any = {
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
_lowerCamelCase : Tuple = ['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
_lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 |
import math
def a_ ( __lowercase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a_ ( __lowercase : float = 0.1 ) -> int:
_snake_case = 3
_snake_case = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__lowercase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 | 1 |
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def a_ ( __lowercase : List[Any] , __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : Optional[int] ) -> Tuple:
_snake_case = s.rsplit(__lowercase , __lowercase )
return new.join(__lowercase )
def a_ ( __lowercase : List[Any] ) -> str:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def a_ ( __lowercase : List[str] ) -> Any:
_snake_case = {}
_snake_case = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
_snake_case = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' )
if "res_path" in key:
_snake_case = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
_snake_case = rreplace(__lowercase , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
_snake_case = rreplace(__lowercase , '.b' , '.bias' , 1 )
_snake_case = value.float()
return upgrade
@torch.no_grad()
def a_ ( __lowercase : List[str] , __lowercase : List[str] , __lowercase : Optional[Any]=None , __lowercase : str=True ) -> List[Any]:
from dall_e import Encoder
_snake_case = Encoder()
if os.path.exists(__lowercase ):
_snake_case = torch.load(__lowercase )
else:
_snake_case = torch.hub.load_state_dict_from_url(__lowercase )
if isinstance(__lowercase , __lowercase ):
_snake_case = ckpt.state_dict()
encoder.load_state_dict(__lowercase )
if config_path is not None:
_snake_case = FlavaImageCodebookConfig.from_pretrained(__lowercase )
else:
_snake_case = FlavaImageCodebookConfig()
_snake_case = FlavaImageCodebook(__lowercase ).eval()
_snake_case = encoder.state_dict()
_snake_case = upgrade_state_dict(__lowercase )
hf_model.load_state_dict(__lowercase )
_snake_case = hf_model.state_dict()
_snake_case = count_parameters(__lowercase )
_snake_case = count_parameters(__lowercase )
assert torch.allclose(__lowercase , __lowercase , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(__lowercase )
else:
return hf_state_dict
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 686 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = "resnet"
_UpperCAmelCase : Any = ["basic", "bottleneck"]
def __init__( self : Union[str, Any] , lowercase : Dict=3 , lowercase : Any=64 , lowercase : Any=[256, 512, 1_024, 2_048] , lowercase : Dict=[3, 4, 6, 3] , lowercase : Any="bottleneck" , lowercase : Optional[Any]="relu" , lowercase : Dict=False , lowercase : str=None , lowercase : Tuple=None , **lowercase : List[Any] , ):
'''simple docstring'''
super().__init__(**lowercase )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
_snake_case = num_channels
_snake_case = embedding_size
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = layer_type
_snake_case = hidden_act
_snake_case = downsample_in_first_stage
_snake_case = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(lowercase ) + 1 )]
_snake_case , _snake_case = get_aligned_output_features_output_indices(
out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = version.parse("1.11" )
@property
def A ( self : int ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
return 1E-3 | 686 | 1 |
def a_ ( __lowercase : list ) -> list:
if any(not isinstance(__lowercase , __lowercase ) or x < 0 for x in sequence ):
raise TypeError('Sequence must be list of non-negative integers' )
for _ in range(len(__lowercase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__lowercase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9] | 686 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : List[Any] , lowercase : Union[str, Any] , lowercase : int ):
'''simple docstring'''
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(lowercase ) for s in shape] )}.npy'''
def A ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def A ( self : List[Any] , lowercase : Tuple=0 , lowercase : Optional[int]=(4, 4, 64, 64) , lowercase : Optional[int]=False ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase )
return image
def A ( self : Tuple , lowercase : Any=False , lowercase : Union[str, Any]="CompVis/stable-diffusion-v1-4" ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = 'bf16' if fpaa else None
_snake_case , _snake_case = FlaxUNetaDConditionModel.from_pretrained(
lowercase , subfolder='unet' , dtype=lowercase , revision=lowercase )
return model, params
def A ( self : Union[str, Any] , lowercase : str=0 , lowercase : Optional[Any]=(4, 77, 768) , lowercase : int=False ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def A ( self : Tuple , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : List[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=lowercase )
_snake_case = self.get_latents(lowercase , fpaa=lowercase )
_snake_case = self.get_encoder_hidden_states(lowercase , fpaa=lowercase )
_snake_case = model.apply(
{'params': params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
_snake_case = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_snake_case = jnp.array(lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def A ( self : str , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : List[str] ):
'''simple docstring'''
_snake_case , _snake_case = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=lowercase )
_snake_case = self.get_latents(lowercase , shape=(4, 4, 96, 96) , fpaa=lowercase )
_snake_case = self.get_encoder_hidden_states(lowercase , shape=(4, 77, 1_024) , fpaa=lowercase )
_snake_case = model.apply(
{'params': params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
_snake_case = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_snake_case = jnp.array(lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1E-2 ) | 686 | 1 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
_lowerCamelCase , _lowerCamelCase : List[str] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
_lowerCamelCase : str = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
_lowerCamelCase : Tuple = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_lowerCamelCase : str = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F'pip install -r transformers/examples/{example_dir}/requirements.txt'])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F'python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True) | 686 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( __lowercase : Any ) -> List[Any]:
_snake_case = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a_ ( __lowercase : Dict ) -> Tuple:
_snake_case , _snake_case = emb.weight.shape
_snake_case = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_snake_case = emb.weight.data
return lin_layer
def a_ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None ) -> Tuple:
_snake_case = {}
for old_key in state_dict.keys():
_snake_case = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_snake_case = key.replace('moe_layer.experts.0' , f'''ffn.experts.expert_{expert_idx}''' )
else:
_snake_case = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
_snake_case = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
_snake_case = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
_snake_case = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
_snake_case = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
_snake_case = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
_snake_case = key.replace('final_layer_norm' , 'ff_layer_norm' )
_snake_case = state_dict[old_key]
return new_dict
def a_ ( __lowercase : Optional[Any] , __lowercase : Tuple , __lowercase : Any , __lowercase : List[str] , __lowercase : str = WEIGHTS_NAME ) -> Union[str, Any]:
_snake_case = []
_snake_case = 0
os.makedirs(__lowercase , exist_ok=__lowercase )
for expert in range(__lowercase ):
_snake_case = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(__lowercase ):
_snake_case = torch.load(__lowercase )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = os.path.join(
__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
torch.save(__lowercase , __lowercase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__lowercase )[0]].dtype )
# Add the last block
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
_snake_case = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__lowercase ) == 1:
_snake_case = os.path.join(__lowercase , __lowercase )
torch.save(__lowercase , __lowercase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__lowercase , __lowercase )
# Otherwise, let's build the index
_snake_case = {}
for idx, shard in enumerate(__lowercase ):
_snake_case = weights_name.replace('.bin' , f'''-{idx+1:05d}-of-{len(__lowercase ):05d}.bin''' )
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__lowercase , os.path.join(__lowercase , __lowercase ) )
for key in shard:
_snake_case = shard_file
# Add the metadata
_snake_case = {'total_size': total_size}
_snake_case = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(__lowercase , __lowercase ) , 'w' , encoding='utf-8' ) as f:
_snake_case = json.dumps(__lowercase , indent=2 , sort_keys=__lowercase ) + '\n'
f.write(__lowercase )
return metadata, index
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
_lowerCamelCase : List[str] = parser.parse_args()
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_lowerCamelCase : Tuple = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_lowerCamelCase : Dict = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path) | 686 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_lowerCamelCase : List[str] = '''
Human: <<task>>
Assistant: '''
_lowerCamelCase : Optional[int] = '''huggingface-tools/default-prompts'''
_lowerCamelCase : int = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def a_ ( __lowercase : Optional[Any] , __lowercase : str , __lowercase : Any="run" ) -> str:
if prompt_or_repo_id is None:
_snake_case = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , __lowercase ) is not None:
return prompt_or_repo_id
_snake_case = cached_file(
__lowercase , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(__lowercase , 'r' , encoding='utf-8' ) as f:
return f.read() | 686 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_lowerCamelCase : List[Any] = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_lowerCamelCase : Any = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_lowerCamelCase : Union[str, Any] = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def a_ ( __lowercase : List[Any] , __lowercase : Any ) -> Union[str, Any]:
return float((preds == labels).mean() )
def a_ ( __lowercase : Optional[Any] , __lowercase : List[str] ) -> Dict:
_snake_case = simple_accuracy(__lowercase , __lowercase )
_snake_case = float(fa_score(y_true=__lowercase , y_pred=__lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( __lowercase : int , __lowercase : str ) -> str:
_snake_case = float(pearsonr(__lowercase , __lowercase )[0] )
_snake_case = float(spearmanr(__lowercase , __lowercase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def A ( self : List[Any] , lowercase : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowercase , lowercase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowercase , lowercase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowercase , lowercase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' ) | 686 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : str = KandinskyVaaControlnetImgaImgPipeline
_UpperCAmelCase : Optional[int] = ["image_embeds", "negative_image_embeds", "image", "hint"]
_UpperCAmelCase : str = ["image_embeds", "negative_image_embeds", "image", "hint"]
_UpperCAmelCase : Any = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_UpperCAmelCase : str = False
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def A ( self : str ):
'''simple docstring'''
return 32
@property
def A ( self : Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
return 100
@property
def A ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_snake_case = UNetaDConditionModel(**lowercase )
return model
@property
def A ( self : Any ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def A ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case = VQModel(**self.dummy_movq_kwargs )
return model
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = self.dummy_unet
_snake_case = self.dummy_movq
_snake_case = {
'num_train_timesteps': 1_000,
'beta_schedule': 'linear',
'beta_start': 0.00085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
_snake_case = DDIMScheduler(**lowercase )
_snake_case = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def A ( self : List[Any] , lowercase : Optional[Any] , lowercase : Dict=0 ):
'''simple docstring'''
_snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase ) ).to(lowercase )
_snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowercase )
# create init_image
_snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase ) ).to(lowercase )
_snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_snake_case = Image.fromarray(np.uinta(lowercase ) ).convert('RGB' ).resize((256, 256) )
# create hint
_snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase ) ).to(lowercase )
if str(lowercase ).startswith('mps' ):
_snake_case = torch.manual_seed(lowercase )
else:
_snake_case = torch.Generator(device=lowercase ).manual_seed(lowercase )
_snake_case = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def A ( self : int ):
'''simple docstring'''
_snake_case = 'cpu'
_snake_case = self.get_dummy_components()
_snake_case = self.pipeline_class(**lowercase )
_snake_case = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_snake_case = pipe(**self.get_dummy_inputs(lowercase ) )
_snake_case = output.images
_snake_case = pipe(
**self.get_dummy_inputs(lowercase ) , return_dict=lowercase , )[0]
_snake_case = image[0, -3:, -3:, -1]
_snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
_snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_snake_case = init_image.resize((512, 512) )
_snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
_snake_case = torch.from_numpy(np.array(lowercase ) ).float() / 255.0
_snake_case = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
_snake_case = 'A robot, 4k photo'
_snake_case = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(lowercase )
_snake_case = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
_snake_case = pipeline.to(lowercase )
pipeline.set_progress_bar_config(disable=lowercase )
_snake_case = torch.Generator(device='cpu' ).manual_seed(0 )
_snake_case , _snake_case = pipe_prior(
lowercase , image=lowercase , strength=0.85 , generator=lowercase , negative_prompt='' , ).to_tuple()
_snake_case = pipeline(
image=lowercase , image_embeds=lowercase , negative_image_embeds=lowercase , hint=lowercase , generator=lowercase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='np' , )
_snake_case = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowercase , lowercase ) | 686 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_lowerCamelCase : Dict = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = "sequence-classification"
def __init__( self : Optional[int] , lowercase : Any ):
'''simple docstring'''
if type(lowercase ) == dict:
_snake_case = Namespace(**lowercase )
_snake_case = glue_output_modes[hparams.task]
_snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(lowercase , lowercase , self.mode )
def A ( self : Optional[Any] , **lowercase : Optional[Any] ):
'''simple docstring'''
return self.model(**lowercase )
def A ( self : Optional[Any] , lowercase : str , lowercase : Tuple ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case = outputs[0]
_snake_case = self.trainer.lr_schedulers[0]['scheduler']
_snake_case = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.hparams
_snake_case = processors[args.task]()
_snake_case = processor.get_labels()
for mode in ["train", "dev"]:
_snake_case = self._feature_file(lowercase )
if os.path.exists(lowercase ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , lowercase )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_snake_case = convert_examples_to_features(
lowercase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , lowercase )
torch.save(lowercase , lowercase )
def A ( self : Dict , lowercase : str , lowercase : int , lowercase : bool = False ):
'''simple docstring'''
_snake_case = 'dev' if mode == 'test' else mode
_snake_case = self._feature_file(lowercase )
logger.info('Loading features from cached file %s' , lowercase )
_snake_case = torch.load(lowercase )
_snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(lowercase , lowercase , lowercase , lowercase ) , batch_size=lowercase , shuffle=lowercase , )
def A ( self : str , lowercase : Optional[Any] , lowercase : str ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case , _snake_case = outputs[:2]
_snake_case = logits.detach().cpu().numpy()
_snake_case = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : int , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_snake_case = np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_snake_case = np.argmax(lowercase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_snake_case = np.squeeze(lowercase )
_snake_case = np.concatenate([x['target'] for x in outputs] , axis=0 )
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , lowercase , lowercase )}
_snake_case = dict(results.items() )
_snake_case = results
return ret, preds_list, out_label_list
def A ( self : int , lowercase : list ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : List[str] , lowercase : Any ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( lowercase : Tuple , lowercase : Any ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(lowercase , lowercase )
parser.add_argument(
'--max_seq_length' , default=128 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=lowercase , required=lowercase , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=lowercase , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def a_ ( ) -> Union[str, Any]:
_snake_case = argparse.ArgumentParser()
add_generic_args(__lowercase , os.getcwd() )
_snake_case = GLUETransformer.add_model_specific_args(__lowercase , os.getcwd() )
_snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_snake_case = os.path.join(
'./results' , f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_snake_case = GLUETransformer(__lowercase )
_snake_case = generic_train(__lowercase , __lowercase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_snake_case = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=__lowercase ) )
_snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__lowercase )
if __name__ == "__main__":
main() | 686 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : str = "audio-spectrogram-transformer"
def __init__( self : Any , lowercase : int=768 , lowercase : str=12 , lowercase : Optional[Any]=12 , lowercase : Optional[int]=3_072 , lowercase : List[Any]="gelu" , lowercase : int=0.0 , lowercase : str=0.0 , lowercase : Optional[Any]=0.02 , lowercase : Dict=1E-12 , lowercase : List[Any]=16 , lowercase : Dict=True , lowercase : int=10 , lowercase : Optional[Any]=10 , lowercase : Union[str, Any]=1_024 , lowercase : Optional[Any]=128 , **lowercase : Dict , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = patch_size
_snake_case = qkv_bias
_snake_case = frequency_stride
_snake_case = time_stride
_snake_case = max_length
_snake_case = num_mel_bins | 686 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = LEDConfig
_UpperCAmelCase : int = {}
_UpperCAmelCase : List[str] = "gelu"
def __init__( self : Union[str, Any] , lowercase : Optional[int] , lowercase : Dict=13 , lowercase : Dict=7 , lowercase : Tuple=True , lowercase : Dict=False , lowercase : Dict=99 , lowercase : Any=32 , lowercase : List[Any]=2 , lowercase : List[str]=4 , lowercase : List[str]=37 , lowercase : Dict=0.1 , lowercase : int=0.1 , lowercase : List[Any]=20 , lowercase : int=2 , lowercase : Optional[Any]=1 , lowercase : List[str]=0 , lowercase : Optional[int]=4 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = eos_token_id
_snake_case = pad_token_id
_snake_case = bos_token_id
_snake_case = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case = prepare_led_inputs_dict(lowercase , lowercase , lowercase )
_snake_case = tf.concat(
[tf.zeros_like(lowercase )[:, :-1], tf.ones_like(lowercase )[:, -1:]] , axis=-1 , )
_snake_case = global_attention_mask
return config, inputs_dict
def A ( self : str , lowercase : str , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = TFLEDModel(config=lowercase ).get_decoder()
_snake_case = inputs_dict['input_ids']
_snake_case = input_ids[:1, :]
_snake_case = inputs_dict['attention_mask'][:1, :]
_snake_case = 1
# first forward pass
_snake_case = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
_snake_case , _snake_case = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case = model(lowercase , attention_mask=lowercase )[0]
_snake_case = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case = output_from_no_past[:, -3:, random_slice_idx]
_snake_case = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : str=None , ) -> Union[str, Any]:
if attention_mask is None:
_snake_case = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_UpperCAmelCase : Optional[int] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_UpperCAmelCase : Tuple = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCAmelCase : str = True
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : str = False
_UpperCAmelCase : List[Any] = False
def A ( self : Any ):
'''simple docstring'''
_snake_case = TFLEDModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = tf.zeros_like(inputs_dict['attention_mask'] )
_snake_case = 2
_snake_case = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
_snake_case = True
_snake_case = self.model_tester.seq_length
_snake_case = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase : List[str] ):
_snake_case = outputs.decoder_attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase : List[str] ):
_snake_case = [t.numpy() for t in outputs.encoder_attentions]
_snake_case = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = False
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
_snake_case = len(lowercase )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
if self.is_encoder_decoder:
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_decoder_attentions_output(lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
# Check attention is always last and order is fine
_snake_case = True
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase ) )
self.assertEqual(model.config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def A ( self : List[Any] ):
'''simple docstring'''
pass
def A ( self : Any ):
'''simple docstring'''
pass
def a_ ( __lowercase : str ) -> Optional[Any]:
return tf.constant(__lowercase , dtype=tf.intaa )
_lowerCamelCase : List[Any] = 1E-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 )
def A ( self : str ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 , rtol=1E-3 ) | 686 | 1 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Tuple = ["transformers", "torch", "note_seq"]
def __init__( self : List[Any] , *lowercase : List[Any] , **lowercase : Dict ):
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] ) | 686 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_lowerCamelCase : Union[str, Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_lowerCamelCase : Union[str, Any] = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
_lowerCamelCase : Optional[int] = '''zero2'''
_lowerCamelCase : List[Any] = '''zero3'''
_lowerCamelCase : Dict = [ZEROa, ZEROa]
def a_ ( __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Tuple ) -> Dict:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_snake_case = parameterized.to_safe_name('_'.join(str(__lowercase ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
_lowerCamelCase : Dict = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : List[Any] , lowercase : Dict ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Any , lowercase : str , lowercase : List[str] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : Optional[Any] , lowercase : Optional[int] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
def A ( self : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
pass
def A ( self : str , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : bool = True , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = models[model]
_snake_case = self.run_trainer(
stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , )
self.do_checks(lowercase )
return output_dir
def A ( self : Any , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : int = 1 , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase )
_snake_case = f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_snake_case = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_snake_case = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_snake_case = self.get_launcher(lowercase )
_snake_case = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase , env=self.get_env() )
return output_dir
def A ( self : List[str] , lowercase : Any=False ):
'''simple docstring'''
_snake_case = min(2 , get_gpu_count() ) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split() | 686 | 1 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a_ ( __lowercase : str , __lowercase : int , __lowercase : Dict ) -> Dict:
# Construct model
if gpta_config_file == "":
_snake_case = GPTaConfig()
else:
_snake_case = GPTaConfig.from_json_file(__lowercase )
_snake_case = GPTaModel(__lowercase )
# Load weights from numpy
load_tf_weights_in_gpta(__lowercase , __lowercase , __lowercase )
# Save pytorch-model
_snake_case = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
_snake_case = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , __lowercase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__lowercase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path) | 686 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCamelCase : int = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 1 |
def a_ ( __lowercase : str , __lowercase : str ) -> int:
if len(__lowercase ) != len(__lowercase ):
raise ValueError('String lengths must match!' )
_snake_case = 0
for chara, chara in zip(__lowercase , __lowercase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 |
import random
from .binary_exp_mod import bin_exp_mod
def a_ ( __lowercase : int , __lowercase : Any=1_000 ) -> int:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_snake_case = n - 1
_snake_case = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_snake_case = 0
while count < prec:
_snake_case = random.randint(2 , n - 1 )
_snake_case = bin_exp_mod(__lowercase , __lowercase , __lowercase )
if b != 1:
_snake_case = True
for _ in range(__lowercase ):
if b == n - 1:
_snake_case = False
break
_snake_case = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_lowerCamelCase : Tuple = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i))) | 686 | 1 |
from math import sqrt
def a_ ( __lowercase : int ) -> bool:
assert isinstance(__lowercase , __lowercase ) and (
number >= 0
), "'number' must been an int and positive"
_snake_case = True
# 0 and 1 are none primes.
if number <= 1:
_snake_case = False
for divisor in range(2 , int(round(sqrt(__lowercase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
_snake_case = False
break
# precondition
assert isinstance(__lowercase , __lowercase ), "'status' must been from type bool"
return status
def a_ ( __lowercase : int ) -> Dict:
assert isinstance(__lowercase , __lowercase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
_snake_case = list(range(2 , n + 1 ) )
_snake_case = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__lowercase ) ):
for j in range(i + 1 , len(__lowercase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
_snake_case = 0
# filters actual prime numbers.
_snake_case = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__lowercase , __lowercase ), "'ans' must been from type list"
return ans
def a_ ( __lowercase : int ) -> Optional[Any]:
assert isinstance(__lowercase , __lowercase ) and (n > 2), "'N' must been an int and > 2"
_snake_case = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(__lowercase ):
ans.append(__lowercase )
# precondition
assert isinstance(__lowercase , __lowercase ), "'ans' must been from type list"
return ans
def a_ ( __lowercase : Optional[int] ) -> Any:
assert isinstance(__lowercase , __lowercase ) and number >= 0, "'number' must been an int and >= 0"
_snake_case = [] # this list will be returns of the function.
# potential prime number factors.
_snake_case = 2
_snake_case = number
if number == 0 or number == 1:
ans.append(__lowercase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__lowercase ):
while quotient != 1:
if is_prime(__lowercase ) and (quotient % factor == 0):
ans.append(__lowercase )
quotient /= factor
else:
factor += 1
else:
ans.append(__lowercase )
# precondition
assert isinstance(__lowercase , __lowercase ), "'ans' must been from type list"
return ans
def a_ ( __lowercase : Optional[Any] ) -> Any:
assert isinstance(__lowercase , __lowercase ) and (
number >= 0
), "'number' bust been an int and >= 0"
_snake_case = 0
# prime factorization of 'number'
_snake_case = prime_factorization(__lowercase )
_snake_case = max(__lowercase )
# precondition
assert isinstance(__lowercase , __lowercase ), "'ans' must been from type int"
return ans
def a_ ( __lowercase : Optional[int] ) -> Dict:
assert isinstance(__lowercase , __lowercase ) and (
number >= 0
), "'number' bust been an int and >= 0"
_snake_case = 0
# prime factorization of 'number'
_snake_case = prime_factorization(__lowercase )
_snake_case = min(__lowercase )
# precondition
assert isinstance(__lowercase , __lowercase ), "'ans' must been from type int"
return ans
def a_ ( __lowercase : Union[str, Any] ) -> Union[str, Any]:
assert isinstance(__lowercase , __lowercase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , __lowercase ), "compare bust been from type bool"
return number % 2 == 0
def a_ ( __lowercase : Optional[int] ) -> Optional[int]:
assert isinstance(__lowercase , __lowercase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , __lowercase ), "compare bust been from type bool"
return number % 2 != 0
def a_ ( __lowercase : Union[str, Any] ) -> Dict:
assert (
isinstance(__lowercase , __lowercase ) and (number > 2) and is_even(__lowercase )
), "'number' must been an int, even and > 2"
_snake_case = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
_snake_case = get_prime_numbers(__lowercase )
_snake_case = len(__lowercase )
# run variable for while-loops.
_snake_case = 0
_snake_case = None
# exit variable. for break up the loops
_snake_case = True
while i < len_pn and loop:
_snake_case = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
_snake_case = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__lowercase , __lowercase )
and (len(__lowercase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def a_ ( __lowercase : Tuple , __lowercase : List[Any] ) -> Any:
assert (
isinstance(__lowercase , __lowercase )
and isinstance(__lowercase , __lowercase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
_snake_case = 0
while numbera != 0:
_snake_case = numbera % numbera
_snake_case = numbera
_snake_case = rest
# precondition
assert isinstance(__lowercase , __lowercase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def a_ ( __lowercase : str , __lowercase : int ) -> Union[str, Any]:
assert (
isinstance(__lowercase , __lowercase )
and isinstance(__lowercase , __lowercase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
_snake_case = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
_snake_case = prime_factorization(__lowercase )
_snake_case = prime_factorization(__lowercase )
elif numbera == 1 or numbera == 1:
_snake_case = []
_snake_case = []
_snake_case = max(__lowercase , __lowercase )
_snake_case = 0
_snake_case = 0
_snake_case = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
_snake_case = prime_fac_a.count(__lowercase )
_snake_case = prime_fac_a.count(__lowercase )
for _ in range(max(__lowercase , __lowercase ) ):
ans *= n
else:
_snake_case = prime_fac_a.count(__lowercase )
for _ in range(__lowercase ):
ans *= n
done.append(__lowercase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
_snake_case = prime_fac_a.count(__lowercase )
for _ in range(__lowercase ):
ans *= n
done.append(__lowercase )
# precondition
assert isinstance(__lowercase , __lowercase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def a_ ( __lowercase : List[str] ) -> str:
assert isinstance(__lowercase , __lowercase ) and (n >= 0), "'number' must been a positive int"
_snake_case = 0
_snake_case = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__lowercase ):
ans += 1
# precondition
assert isinstance(__lowercase , __lowercase ) and is_prime(
__lowercase ), "'ans' must been a prime number and from type int"
return ans
def a_ ( __lowercase : str , __lowercase : Tuple ) -> Union[str, Any]:
assert (
is_prime(__lowercase ) and is_prime(__lowercase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
_snake_case = p_number_a + 1 # jump to the next number
_snake_case = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__lowercase ):
number += 1
while number < p_number_a:
ans.append(__lowercase )
number += 1
# fetch the next prime number.
while not is_prime(__lowercase ):
number += 1
# precondition
assert (
isinstance(__lowercase , __lowercase )
and ans[0] != p_number_a
and ans[len(__lowercase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def a_ ( __lowercase : Dict ) -> Dict:
assert isinstance(__lowercase , __lowercase ) and (n >= 1), "'n' must been int and >= 1"
_snake_case = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(__lowercase )
# precondition
assert ans[0] == 1 and ans[len(__lowercase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def a_ ( __lowercase : Tuple ) -> Tuple:
assert isinstance(__lowercase , __lowercase ) and (
number > 1
), "'number' must been an int and >= 1"
_snake_case = get_divisors(__lowercase )
# precondition
assert (
isinstance(__lowercase , __lowercase )
and (divisors[0] == 1)
and (divisors[len(__lowercase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def a_ ( __lowercase : Optional[Any] , __lowercase : List[Any] ) -> Any:
assert (
isinstance(__lowercase , __lowercase )
and isinstance(__lowercase , __lowercase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
_snake_case = gcd(abs(__lowercase ) , abs(__lowercase ) )
# precondition
assert (
isinstance(__lowercase , __lowercase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def a_ ( __lowercase : Tuple ) -> str:
assert isinstance(__lowercase , __lowercase ) and (n >= 0), "'n' must been a int and >= 0"
_snake_case = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def a_ ( __lowercase : Dict ) -> Tuple:
assert isinstance(__lowercase , __lowercase ) and (n >= 0), "'n' must been an int and >= 0"
_snake_case = 0
_snake_case = 1
_snake_case = 1 # this will be return
for _ in range(n - 1 ):
_snake_case = ans
ans += fiba
_snake_case = tmp
return ans | 686 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_lowerCamelCase : int = re.compile(r'''\s+''')
def a_ ( __lowercase : List[Any] ) -> int:
return {"hash": hashlib.mda(re.sub(__lowercase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def a_ ( __lowercase : List[Any] ) -> Dict:
_snake_case = [len(__lowercase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(__lowercase ), "line_max": max(__lowercase )}
def a_ ( __lowercase : Optional[int] ) -> List[str]:
_snake_case = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] ) -> Optional[int]:
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def a_ ( __lowercase : Union[str, Any] , __lowercase : int=5 ) -> Optional[Any]:
_snake_case = ['auto-generated', 'autogenerated', 'automatically generated']
_snake_case = example['content'].splitlines()
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a_ ( __lowercase : List[Any] , __lowercase : int=5 , __lowercase : Tuple=0.0_5 ) -> Union[str, Any]:
_snake_case = ['unit tests', 'test file', 'configuration file']
_snake_case = example['content'].splitlines()
_snake_case = 0
_snake_case = 0
# first test
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_snake_case = example['content'].count('\n' )
_snake_case = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a_ ( __lowercase : Union[str, Any] ) -> Any:
_snake_case = ['def ', 'class ', 'for ', 'while ']
_snake_case = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a_ ( __lowercase : Tuple , __lowercase : Any=4 ) -> List[str]:
_snake_case = example['content'].splitlines()
_snake_case = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a_ ( __lowercase : Dict ) -> Dict:
_snake_case = tokenizer(example['content'] , truncation=__lowercase )['input_ids']
_snake_case = len(example['content'] ) / len(__lowercase )
return {"ratio": ratio}
def a_ ( __lowercase : Optional[Any] ) -> Any:
_snake_case = {}
results.update(get_hash(__lowercase ) )
results.update(line_stats(__lowercase ) )
results.update(alpha_stats(__lowercase ) )
results.update(char_token_ratio(__lowercase ) )
results.update(is_autogenerated(__lowercase ) )
results.update(is_config_or_test(__lowercase ) )
results.update(has_no_keywords(__lowercase ) )
results.update(has_few_assignments(__lowercase ) )
return results
def a_ ( __lowercase : Optional[int] , __lowercase : str , __lowercase : List[Any] ) -> int:
if not check_uniques(__lowercase , __lowercase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a_ ( __lowercase : Dict ) -> Dict:
with open(__lowercase , 'rb' ) as f_in:
with gzip.open(str(__lowercase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(__lowercase , __lowercase )
os.unlink(__lowercase )
# Settings
_lowerCamelCase : Dict = HfArgumentParser(PreprocessingArguments)
_lowerCamelCase : Dict = parser.parse_args()
if args.num_workers is None:
_lowerCamelCase : int = multiprocessing.cpu_count()
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_lowerCamelCase : Any = time.time()
_lowerCamelCase : Optional[Any] = load_dataset(args.dataset_name, split='''train''')
print(F'Time to load dataset: {time.time()-t_start:.2f}')
# Run preprocessing
_lowerCamelCase : Optional[int] = time.time()
_lowerCamelCase : Union[str, Any] = ds.map(preprocess, num_proc=args.num_workers)
print(F'Time to preprocess dataset: {time.time()-t_start:.2f}')
# Deduplicate hashes
_lowerCamelCase : List[Any] = set(ds.unique('''hash'''))
_lowerCamelCase : Dict = len(uniques) / len(ds)
print(F'Fraction of duplicates: {1-frac:.2%}')
# Deduplicate data and apply heuristics
_lowerCamelCase : List[Any] = time.time()
_lowerCamelCase : Optional[int] = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F'Time to filter dataset: {time.time()-t_start:.2f}')
print(F'Size of filtered dataset: {len(ds_filter)}')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_lowerCamelCase : Union[str, Any] = time.time()
_lowerCamelCase , _lowerCamelCase : Dict = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'Time to deduplicate dataset: {time.time()-t_start:.2f}')
print(F'Size of deduplicate dataset: {len(ds_filter)}')
# Save data in batches of samples_per_file
_lowerCamelCase : Optional[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
_lowerCamelCase : int = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
_lowerCamelCase : Union[str, Any] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_lowerCamelCase : Dict = str(data_dir / F'file-{file_number+1:012}.json')
_lowerCamelCase : str = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'Time to save dataset: {time.time()-t_start:.2f}') | 686 | 1 |
def a_ ( __lowercase : str , __lowercase : str ) -> bool:
_snake_case = len(__lowercase ) + 1
_snake_case = len(__lowercase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_snake_case = [[0 for i in range(__lowercase )] for j in range(__lowercase )]
# since string of zero length match pattern of zero length
_snake_case = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __lowercase ):
_snake_case = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __lowercase ):
_snake_case = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __lowercase ):
for j in range(1 , __lowercase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_snake_case = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_snake_case = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_snake_case = dp[i - 1][j]
else:
_snake_case = 0
else:
_snake_case = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_lowerCamelCase : int = '''aab'''
_lowerCamelCase : List[str] = '''c*a*b'''
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'{input_string} matches the given pattern {pattern}')
else:
print(F'{input_string} does not match with the given pattern {pattern}') | 686 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : int = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = "yolos"
def __init__( self : int , lowercase : List[str]=768 , lowercase : Tuple=12 , lowercase : int=12 , lowercase : int=3_072 , lowercase : Optional[int]="gelu" , lowercase : str=0.0 , lowercase : Optional[int]=0.0 , lowercase : Optional[Any]=0.02 , lowercase : List[str]=1E-12 , lowercase : Dict=[512, 864] , lowercase : Union[str, Any]=16 , lowercase : List[Any]=3 , lowercase : List[str]=True , lowercase : Optional[int]=100 , lowercase : int=True , lowercase : Dict=False , lowercase : str=1 , lowercase : int=5 , lowercase : Tuple=2 , lowercase : List[str]=5 , lowercase : Any=2 , lowercase : List[str]=0.1 , **lowercase : int , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = qkv_bias
_snake_case = num_detection_tokens
_snake_case = use_mid_position_embeddings
_snake_case = auxiliary_loss
# Hungarian matcher
_snake_case = class_cost
_snake_case = bbox_cost
_snake_case = giou_cost
# Loss coefficients
_snake_case = bbox_loss_coefficient
_snake_case = giou_loss_coefficient
_snake_case = eos_coefficient
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = version.parse("1.11" )
@property
def A ( self : str ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Any ):
'''simple docstring'''
return 1E-4
@property
def A ( self : List[Any] ):
'''simple docstring'''
return 12 | 686 | 1 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = ["image_processor", "tokenizer"]
_UpperCAmelCase : Dict = "AutoImageProcessor"
_UpperCAmelCase : Dict = "AutoTokenizer"
def __init__( self : List[str] , lowercase : Dict=None , lowercase : str=None , **lowercase : Dict ):
'''simple docstring'''
_snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase , )
_snake_case = kwargs.pop('feature_extractor' )
_snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowercase , lowercase )
_snake_case = self.image_processor
_snake_case = False
def __call__( self : List[str] , *lowercase : str , **lowercase : List[Any] ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowercase , **lowercase )
_snake_case = kwargs.pop('images' , lowercase )
_snake_case = kwargs.pop('text' , lowercase )
if len(lowercase ) > 0:
_snake_case = args[0]
_snake_case = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
_snake_case = self.image_processor(lowercase , *lowercase , **lowercase )
if text is not None:
_snake_case = self.tokenizer(lowercase , **lowercase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_snake_case = encodings['input_ids']
return inputs
def A ( self : Optional[Any] , *lowercase : List[Any] , **lowercase : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def A ( self : Any , *lowercase : str , **lowercase : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*lowercase , **lowercase )
@contextmanager
def A ( self : str ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
_snake_case = True
_snake_case = self.tokenizer
yield
_snake_case = self.image_processor
_snake_case = False
def A ( self : Any , lowercase : List[str] , lowercase : Any=False , lowercase : List[str]=None ):
'''simple docstring'''
if added_vocab is None:
_snake_case = self.tokenizer.get_added_vocab()
_snake_case = {}
while tokens:
_snake_case = re.search(R'<s_(.*?)>' , lowercase , re.IGNORECASE )
if start_token is None:
break
_snake_case = start_token.group(1 )
_snake_case = re.search(Rf'''</s_{key}>''' , lowercase , re.IGNORECASE )
_snake_case = start_token.group()
if end_token is None:
_snake_case = tokens.replace(lowercase , '' )
else:
_snake_case = end_token.group()
_snake_case = re.escape(lowercase )
_snake_case = re.escape(lowercase )
_snake_case = re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''' , lowercase , re.IGNORECASE )
if content is not None:
_snake_case = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_snake_case = self.tokenajson(lowercase , is_inner_value=lowercase , added_vocab=lowercase )
if value:
if len(lowercase ) == 1:
_snake_case = value[0]
_snake_case = value
else: # leaf nodes
_snake_case = []
for leaf in content.split(R'<sep/>' ):
_snake_case = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_snake_case = leaf[1:-2] # for categorical special tokens
output[key].append(lowercase )
if len(output[key] ) == 1:
_snake_case = output[key][0]
_snake_case = tokens[tokens.find(lowercase ) + len(lowercase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=lowercase , added_vocab=lowercase )
if len(lowercase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def A ( self : int ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase , )
return self.image_processor_class
@property
def A ( self : Optional[int] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase , )
return self.image_processor | 686 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_lowerCamelCase : Tuple = logging.get_logger(__name__)
# General docstring
_lowerCamelCase : Union[str, Any] = '''ResNetConfig'''
# Base docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[Any] = [1, 2_048, 7, 7]
# Image classification docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[int] = '''tiger cat'''
_lowerCamelCase : str = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 3 , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(
lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=kernel_size // 2 , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def A ( self : Union[str, Any] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def A ( self : Tuple , lowercase : Tensor ):
'''simple docstring'''
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowercase )
_snake_case = self.pooler(lowercase )
return embedding
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase : int , lowercase : int , lowercase : int = 2 ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(lowercase , lowercase , kernel_size=1 , stride=lowercase , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : List[str] , lowercase : List[str] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" , lowercase : int = 4 ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , kernel_size=1 ) , ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : Dict , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowercase : ResNetConfig , lowercase : int , lowercase : int , lowercase : int = 2 , lowercase : int = 2 , ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , stride=lowercase , activation=config.hidden_act ) , *[layer(lowercase , lowercase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase , config.depths[1:] ):
self.stages.append(ResNetStage(lowercase , lowercase , lowercase , depth=lowercase ) )
def A ( self : str , lowercase : Tensor , lowercase : bool = False , lowercase : bool = True ):
'''simple docstring'''
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowercase )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase , hidden_states=lowercase , )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = ResNetConfig
_UpperCAmelCase : Tuple = "resnet"
_UpperCAmelCase : Optional[Any] = "pixel_values"
_UpperCAmelCase : Dict = True
def A ( self : List[str] , lowercase : Dict ):
'''simple docstring'''
if isinstance(lowercase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowercase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def A ( self : Tuple , lowercase : List[Any] , lowercase : Optional[Any]=False ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
_snake_case = value
_lowerCamelCase : str = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCamelCase : int = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : Any ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : Union[str, Any] , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowercase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase : int ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowercase )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Union[str, Any] , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[torch.LongTensor] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowercase )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowercase , lowercase )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : Union[str, Any] ):
'''simple docstring'''
super().__init__(lowercase )
super()._init_backbone(lowercase )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@replace_return_docstrings(output_type=lowercase , config_class=_CONFIG_FOR_DOC )
def A ( self : Dict , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowercase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowercase , ) | 686 | 1 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = BioGptTokenizer
_UpperCAmelCase : List[str] = False
def A ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
_snake_case = dict(zip(lowercase , range(len(lowercase ) ) ) )
_snake_case = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(lowercase ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(lowercase ) )
def A ( self : str , lowercase : Any ):
'''simple docstring'''
_snake_case = 'lower newer'
_snake_case = 'lower newer'
return input_text, output_text
def A ( self : int ):
'''simple docstring'''
_snake_case = BioGptTokenizer(self.vocab_file , self.merges_file )
_snake_case = 'lower'
_snake_case = ['low', 'er</w>']
_snake_case = tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
_snake_case = tokens + ['<unk>']
_snake_case = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
@slow
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
_snake_case = tokenizer.encode('sequence builders' , add_special_tokens=lowercase )
_snake_case = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowercase )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a ) | 686 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 1 |
import math
def a_ ( __lowercase : int ) -> bool:
assert isinstance(__lowercase , __lowercase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_snake_case = range(3 , int(math.sqrt(__lowercase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def a_ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=1 , **__lowercase : Tuple ) -> Union[str, Any]:
_snake_case = factor * value
_snake_case = value
while not is_prime(__lowercase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **__lowercase )
return value | 686 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def a_ ( __lowercase : Union[str, Any] ) -> List[Any]:
_snake_case = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
_snake_case = DetaConfig(
backbone_config=__lowercase , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=__lowercase , with_box_refine=__lowercase , two_stage=__lowercase , )
# set labels
_snake_case = 'huggingface/label-files'
if "o365" in model_name:
_snake_case = 366
_snake_case = 'object365-id2label.json'
else:
_snake_case = 91
_snake_case = 'coco-detection-id2label.json'
_snake_case = num_labels
_snake_case = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type='dataset' ) ) , 'r' ) )
_snake_case = {int(__lowercase ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def a_ ( __lowercase : int ) -> str:
_snake_case = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : str ) -> Union[str, Any]:
_snake_case = dct.pop(__lowercase )
_snake_case = val
def a_ ( __lowercase : List[str] , __lowercase : str ) -> Dict:
_snake_case = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_snake_case = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:dim, :]
_snake_case = in_proj_bias[: dim]
_snake_case = in_proj_weight[
dim : dim * 2, :
]
_snake_case = in_proj_bias[
dim : dim * 2
]
_snake_case = in_proj_weight[
-dim :, :
]
_snake_case = in_proj_bias[-dim :]
# fmt: on
def a_ ( __lowercase : Dict , __lowercase : Dict ) -> str:
# transformer decoder self-attention layers
_snake_case = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:hidden_size, :]
_snake_case = in_proj_bias[:hidden_size]
_snake_case = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_snake_case = in_proj_bias[hidden_size : hidden_size * 2]
_snake_case = in_proj_weight[-hidden_size:, :]
_snake_case = in_proj_bias[-hidden_size:]
def a_ ( ) -> List[str]:
_snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a_ ( __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Tuple ) -> Optional[Any]:
_snake_case = get_deta_config(__lowercase )
# load original state dict
if model_name == "deta-swin-large":
_snake_case = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
_snake_case = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
_snake_case = torch.load(__lowercase , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(__lowercase , param.shape )
# rename keys
_snake_case = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_swin_q_k_v(__lowercase , config.backbone_config )
read_in_decoder_q_k_v(__lowercase , __lowercase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "input_proj" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
# finally, create HuggingFace model and load state dict
_snake_case = DetaForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
_snake_case = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(__lowercase )
# load image processor
_snake_case = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
_snake_case = prepare_img()
_snake_case = processor(images=__lowercase , return_tensors='pt' )
_snake_case = encoding['pixel_values']
_snake_case = model(pixel_values.to(__lowercase ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_snake_case = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
_snake_case = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
_snake_case = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
_snake_case = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__lowercase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__lowercase ) , atol=1E-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 686 | 1 |
def a_ ( __lowercase : list ) -> list:
_snake_case = False
while is_sorted is False: # Until all the indices are traversed keep looping
_snake_case = True
for i in range(0 , len(__lowercase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_snake_case , _snake_case = input_list[i + 1], input_list[i]
# swapping if elements not in order
_snake_case = False
for i in range(1 , len(__lowercase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_snake_case , _snake_case = input_list[i + 1], input_list[i]
# swapping if elements not in order
_snake_case = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
_lowerCamelCase : Union[str, Any] = [int(x) for x in input().split()]
# inputing elements of the list in one line
_lowerCamelCase : int = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list) | 686 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_lowerCamelCase : Dict = '''pt'''
elif is_tf_available():
_lowerCamelCase : List[str] = '''tf'''
else:
_lowerCamelCase : List[Any] = '''jax'''
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = PerceiverTokenizer
_UpperCAmelCase : Optional[int] = False
def A ( self : Tuple ):
'''simple docstring'''
super().setUp()
_snake_case = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A ( self : str ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def A ( self : Optional[int] , **lowercase : Dict ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : Optional[int] , lowercase : Tuple , lowercase : Optional[Any]=False , lowercase : int=20 , lowercase : Optional[int]=5 ):
'''simple docstring'''
_snake_case = []
for i in range(len(lowercase ) ):
try:
_snake_case = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_snake_case = list(filter(lambda lowercase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , lowercase ) )
_snake_case = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
_snake_case = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
_snake_case = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case = [t[0] for t in toks]
# Ensure consistency
_snake_case = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
_snake_case = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
_snake_case = ' ' + output_txt
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = 'Unicode €.'
_snake_case = tokenizer(lowercase )
_snake_case = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_snake_case = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]Unicode €.[SEP]' )
_snake_case = tokenizer('e è é ê ë' )
_snake_case = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_snake_case = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_snake_case = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
if FRAMEWORK != "jax":
_snake_case = list(batch.input_ids.numpy()[0] )
else:
_snake_case = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , lowercase )
self.assertIn('attention_mask' , lowercase )
self.assertNotIn('decoder_input_ids' , lowercase )
self.assertNotIn('decoder_attention_mask' , lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = [
'Summary of the text.',
'Another summary.',
]
_snake_case = tokenizer(
text_target=lowercase , max_length=32 , padding='max_length' , truncation=lowercase , return_tensors=lowercase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_snake_case = tokenizer.__class__.from_pretrained(lowercase )
_snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
shutil.rmtree(lowercase )
_snake_case = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_snake_case = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_snake_case = tokenizer.__class__.from_pretrained(lowercase )
_snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_snake_case = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowercase )
_snake_case = [f'''<extra_id_{i}>''' for i in range(125 )]
_snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
_snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case = tokenizer_class.from_pretrained(
lowercase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=lowercase )]
_snake_case = tokenizer_class.from_pretrained(
lowercase , additional_special_tokens=lowercase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
pass
def A ( self : List[str] ):
'''simple docstring'''
pass
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : int ):
'''simple docstring'''
_snake_case = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_snake_case = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
_snake_case = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(lowercase , lowercase ) | 686 | 1 |
def a_ ( __lowercase : int , __lowercase : list[int] , __lowercase : int ) -> int:
def count_of_possible_combinations(__lowercase : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__lowercase )
def a_ ( __lowercase : int , __lowercase : list[int] , __lowercase : int ) -> int:
def count_of_possible_combinations_with_dp_array(
__lowercase : int , __lowercase : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_snake_case = sum(
count_of_possible_combinations_with_dp_array(target - item , __lowercase )
for item in array )
_snake_case = answer
return answer
_snake_case = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__lowercase , __lowercase )
def a_ ( __lowercase : int , __lowercase : list[int] , __lowercase : int ) -> int:
_snake_case = [0] * (target + 1)
_snake_case = 1
for i in range(1 , target + 1 ):
for j in range(__lowercase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase : Optional[int] = 3
_lowerCamelCase : List[Any] = 5
_lowerCamelCase : int = [1, 2, 5]
print(combination_sum_iv(n, array, target)) | 686 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def a_ ( ) -> Optional[int]:
_snake_case , _snake_case = 9, 14 # noqa: F841
_snake_case = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_snake_case = defaultdict(__lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_snake_case = mst(__lowercase )
_snake_case = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_snake_case = tuple(answer[:2] )
_snake_case = tuple(edge[::-1] )
assert edge in result or reverse in result | 686 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCamelCase : Dict = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = ['''MobileNetV2FeatureExtractor''']
_lowerCamelCase : Dict = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
_lowerCamelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Tuple = ["transformers", "torch", "note_seq"]
def __init__( self : List[Any] , *lowercase : List[Any] , **lowercase : Dict ):
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] ) | 686 | 1 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
_lowerCamelCase : Union[str, Any] = 2_048
_lowerCamelCase : Union[str, Any] = 4_096
_lowerCamelCase : str = 42
_lowerCamelCase : List[Any] = os.environ.pop('''PROCESS_TRAIN''', '''false''')
_lowerCamelCase : Tuple = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4}
def a_ ( __lowercase : int ) -> int:
def choose_first(__lowercase : Optional[int] , __lowercase : str=False ):
assert isinstance(__lowercase , __lowercase )
if len(__lowercase ) == 1:
_snake_case = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
_snake_case = {k: [a[k]] for k in a}
if len(a['start_token'] ) > 0:
break
return a
_snake_case = {'id': example['id']}
_snake_case = example['annotations']
_snake_case = annotation['yes_no_answer']
if 0 in yes_no_answer or 1 in yes_no_answer:
_snake_case = ['yes'] if 1 in yes_no_answer else ['no']
_snake_case = _snake_case = []
_snake_case = _snake_case = []
_snake_case = ['<cls>']
else:
_snake_case = ['short']
_snake_case = choose_first(annotation['short_answers'] )
if len(out['start_token'] ) == 0:
# answer will be long if short is not available
_snake_case = ['long']
_snake_case = choose_first(annotation['long_answer'] , is_long_answer=__lowercase )
_snake_case = []
answer.update(__lowercase )
# disregard some samples
if len(answer['start_token'] ) > 1 or answer["start_token"] == answer["end_token"]:
_snake_case = True
else:
_snake_case = False
_snake_case = ['start_token', 'end_token', 'start_byte', 'end_byte', 'text']
if not all(isinstance(answer[k] , __lowercase ) for k in cols ):
raise ValueError('Issue in ID' , example['id'] )
return answer
def a_ ( __lowercase : Optional[int] , __lowercase : Tuple=False ) -> Union[str, Any]:
_snake_case = _get_single_answer(__lowercase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
_snake_case = example['document']['tokens']
_snake_case = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
return {
"context": " ".join(__lowercase ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
_snake_case = ['start_token', 'end_token']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
_snake_case = example['document']['tokens']
_snake_case = answer['start_token']
_snake_case = answer['end_token']
_snake_case = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
_snake_case = ' '.join(context[start_token:end_token] )
# checking above code
if assertion:
_snake_case = doc['is_html'][answer['start_token'] : answer['end_token']]
_snake_case = doc['token'][answer['start_token'] : answer['end_token']]
_snake_case = ' '.join([old[i] for i in range(len(__lowercase ) ) if not is_html[i]] )
if new != old:
print('ID:' , example['id'] )
print('New:' , __lowercase , end='\n' )
print('Old:' , __lowercase , end='\n\n' )
return {
"context": " ".join(__lowercase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def a_ ( __lowercase : Optional[Any] , __lowercase : List[Any] , __lowercase : List[str]=2_048 , __lowercase : int=4_096 , __lowercase : Optional[Any]=True ) -> Dict:
# overlap will be of doc_stride - q_len
_snake_case = get_context_and_ans(__lowercase , assertion=__lowercase )
_snake_case = out['answer']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
_snake_case = tokenizer(example['question']['text'] , out['context'] ).input_ids
_snake_case = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
_snake_case = []
_snake_case = []
_snake_case = input_ids[:q_len]
_snake_case = range(__lowercase , len(__lowercase ) , max_length - doc_stride )
for i in doc_start_indices:
_snake_case = i + max_length - q_len
_snake_case = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['category'][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(__lowercase ),
"end_token": [-100] * len(__lowercase ),
"category": category,
},
}
_snake_case = out['context'].split()
_snake_case = splitted_context[answer['end_token']]
_snake_case = len(
tokenizer(
' '.join(splitted_context[: answer['start_token']] ) , add_special_tokens=__lowercase , ).input_ids )
_snake_case = len(
tokenizer(' '.join(splitted_context[: answer['end_token']] ) , add_special_tokens=__lowercase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
_snake_case = len(tokenizer(__lowercase , add_special_tokens=__lowercase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
_snake_case = input_ids[answer['start_token'] : answer['end_token'] + 1] # right & left are inclusive
_snake_case = answer['start_token']
_snake_case = answer['end_token']
if assertion:
_snake_case = tokenizer.decode(__lowercase )
if answer["span"] != new:
print('ISSUE IN TOKENIZATION' )
print('OLD:' , answer['span'] )
print('NEW:' , __lowercase , end='\n\n' )
if len(__lowercase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
_snake_case = input_ids[:q_len]
_snake_case = range(__lowercase , len(__lowercase ) , max_length - doc_stride )
_snake_case = []
_snake_case = []
_snake_case = []
_snake_case = [] # null, yes, no, long, short
for i in doc_start_indices:
_snake_case = i + max_length - q_len
_snake_case = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
_snake_case = start_token - i + q_len
_snake_case = end_token - i + q_len
answers_category.append(answer['category'][0] ) # ["short"] -> "short"
else:
_snake_case = -100
_snake_case = -100
answers_category.append('null' )
_snake_case = inputs[-1][start_token : end_token + 1]
answers_start_token.append(__lowercase )
answers_end_token.append(__lowercase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('ISSUE in strided for ID:' , example['id'] )
print('New:' , tokenizer.decode(__lowercase ) )
print('Old:' , tokenizer.decode(__lowercase ) , end='\n\n' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def a_ ( __lowercase : int , __lowercase : str , __lowercase : Dict=2_048 , __lowercase : List[str]=4_096 , __lowercase : List[str]=False ) -> int:
_snake_case = get_strided_contexts_and_ans(
__lowercase , __lowercase , doc_stride=__lowercase , max_length=__lowercase , assertion=__lowercase , )
return example
def a_ ( __lowercase : Any , __lowercase : List[Any] ) -> Dict:
with jsonlines.open(__lowercase , 'a' ) as writer:
for example in tqdm(__lowercase , total=len(__lowercase ) , desc='Saving samples ... ' ):
_snake_case = example['labels']
for ids, start, end, cat in zip(
example['input_ids'] , labels['start_token'] , labels['end_token'] , labels['category'] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'input_ids': ids,
'start_token': start,
'end_token': end,
'category': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
_lowerCamelCase : int = load_dataset('''natural_questions''')
_lowerCamelCase : Dict = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
_lowerCamelCase : List[str] = data['''train''' if PROCESS_TRAIN == '''true''' else '''validation''']
_lowerCamelCase : List[Any] = {
'''tokenizer''': tokenizer,
'''doc_stride''': DOC_STRIDE,
'''max_length''': MAX_LENGTH,
'''assertion''': False,
}
_lowerCamelCase : Optional[Any] = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
_lowerCamelCase : Tuple = data.remove_columns(['''annotations''', '''document''', '''id''', '''question'''])
print(data)
np.random.seed(SEED)
_lowerCamelCase : List[str] = '''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl'''
save_to_disk(data, file_name=cache_file_name) | 686 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a_ ( ) -> Optional[Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__lowercase ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def a_ ( ) -> Optional[int]:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def a_ ( ) -> Dict:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__lowercase ):
http_head('https://huggingface.co' ) | 686 | 1 |
def a_ ( __lowercase : int = 1_000 ) -> int:
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution()) | 686 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCamelCase : Optional[int] = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
_lowerCamelCase : List[str] = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
_lowerCamelCase : Dict = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def A ( self : Union[str, Any] , lowercase : Tuple , lowercase : Optional[Any] , lowercase : int=None , lowercase : str=True , lowercase : List[str]=False ):
'''simple docstring'''
if rouge_types is None:
_snake_case = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
_snake_case = rouge_scorer.RougeScorer(rouge_types=lowercase , use_stemmer=lowercase )
if use_aggregator:
_snake_case = scoring.BootstrapAggregator()
else:
_snake_case = []
for ref, pred in zip(lowercase , lowercase ):
_snake_case = scorer.score(lowercase , lowercase )
if use_aggregator:
aggregator.add_scores(lowercase )
else:
scores.append(lowercase )
if use_aggregator:
_snake_case = aggregator.aggregate()
else:
_snake_case = {}
for key in scores[0]:
_snake_case = [score[key] for score in scores]
return result | 686 | 1 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
_lowerCamelCase : int = get_logger(__name__)
def a_ ( __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : List[str] , __lowercase : Optional[Any]=0 ) -> Optional[int]:
os.makedirs(__lowercase , exist_ok=__lowercase )
with FSDP.state_dict_type(
__lowercase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
_snake_case = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
_snake_case = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
_snake_case = os.path.join(__lowercase , __lowercase )
if accelerator.process_index == 0:
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(__lowercase , __lowercase )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
_snake_case = (
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
_snake_case = os.path.join(__lowercase , __lowercase )
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(__lowercase , __lowercase )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
_snake_case = os.path.join(__lowercase , f'''{MODEL_NAME}_{model_index}''' )
os.makedirs(__lowercase , exist_ok=__lowercase )
logger.info(f'''Saving model to {ckpt_dir}''' )
_snake_case = {'model': state_dict}
dist_cp.save_state_dict(
state_dict=__lowercase , storage_writer=dist_cp.FileSystemWriter(__lowercase ) , planner=DefaultSavePlanner() , )
logger.info(f'''Model saved to {ckpt_dir}''' )
def a_ ( __lowercase : str , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : Union[str, Any] , __lowercase : Union[str, Any]=0 ) -> Union[str, Any]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__lowercase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__lowercase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
_snake_case = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
_snake_case = os.path.join(__lowercase , __lowercase )
logger.info(f'''Loading model from {input_model_file}''' )
_snake_case = torch.load(__lowercase )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
_snake_case = (
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
_snake_case = os.path.join(__lowercase , __lowercase )
logger.info(f'''Loading model from {input_model_file}''' )
_snake_case = torch.load(__lowercase )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
_snake_case = (
os.path.join(__lowercase , f'''{MODEL_NAME}_{model_index}''' )
if f'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading model from {ckpt_dir}''' )
_snake_case = {'model': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__lowercase , storage_reader=dist_cp.FileSystemReader(__lowercase ) , planner=DefaultLoadPlanner() , )
_snake_case = state_dict['model']
logger.info(f'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(__lowercase )
def a_ ( __lowercase : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : Any , __lowercase : Any , __lowercase : Tuple , __lowercase : Union[str, Any]=0 ) -> Union[str, Any]:
os.makedirs(__lowercase , exist_ok=__lowercase )
with FSDP.state_dict_type(
__lowercase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
_snake_case = FSDP.optim_state_dict(__lowercase , __lowercase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
_snake_case = (
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
_snake_case = os.path.join(__lowercase , __lowercase )
logger.info(f'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(__lowercase , __lowercase )
logger.info(f'''Optimizer state saved in {output_optimizer_file}''' )
else:
_snake_case = os.path.join(__lowercase , f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(__lowercase , exist_ok=__lowercase )
logger.info(f'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(__lowercase ) , planner=DefaultSavePlanner() , )
logger.info(f'''Optimizer state saved in {ckpt_dir}''' )
def a_ ( __lowercase : Dict , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : int , __lowercase : Tuple , __lowercase : Dict=0 ) -> Any:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__lowercase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
_snake_case = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
_snake_case = (
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
_snake_case = os.path.join(__lowercase , __lowercase )
logger.info(f'''Loading Optimizer state from {input_optimizer_file}''' )
_snake_case = torch.load(__lowercase )
logger.info(f'''Optimizer state loaded from {input_optimizer_file}''' )
else:
_snake_case = (
os.path.join(__lowercase , f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if f'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading Optimizer from {ckpt_dir}''' )
_snake_case = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(__lowercase ) , )
_snake_case = optim_state['optimizer']
logger.info(f'''Optimizer loaded from {ckpt_dir}''' )
_snake_case = FSDP.optim_state_dict_to_load(__lowercase , __lowercase , __lowercase )
optimizer.load_state_dict(__lowercase ) | 686 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = "swin2sr"
_UpperCAmelCase : Optional[int] = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[int] , lowercase : List[Any]=64 , lowercase : int=1 , lowercase : Union[str, Any]=3 , lowercase : Dict=180 , lowercase : List[Any]=[6, 6, 6, 6, 6, 6] , lowercase : Dict=[6, 6, 6, 6, 6, 6] , lowercase : List[Any]=8 , lowercase : List[str]=2.0 , lowercase : Tuple=True , lowercase : Union[str, Any]=0.0 , lowercase : Dict=0.0 , lowercase : Optional[int]=0.1 , lowercase : int="gelu" , lowercase : List[str]=False , lowercase : List[Any]=0.02 , lowercase : List[Any]=1E-5 , lowercase : Optional[int]=2 , lowercase : Tuple=1.0 , lowercase : List[Any]="1conv" , lowercase : List[Any]="pixelshuffle" , **lowercase : List[str] , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = embed_dim
_snake_case = depths
_snake_case = len(lowercase )
_snake_case = num_heads
_snake_case = window_size
_snake_case = mlp_ratio
_snake_case = qkv_bias
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = drop_path_rate
_snake_case = hidden_act
_snake_case = use_absolute_embeddings
_snake_case = layer_norm_eps
_snake_case = initializer_range
_snake_case = upscale
_snake_case = img_range
_snake_case = resi_connection
_snake_case = upsampler | 686 | 1 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_lowerCamelCase : Union[str, Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_lowerCamelCase : Union[str, Any] = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
_lowerCamelCase : Optional[int] = '''zero2'''
_lowerCamelCase : List[Any] = '''zero3'''
_lowerCamelCase : Dict = [ZEROa, ZEROa]
def a_ ( __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Tuple ) -> Dict:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_snake_case = parameterized.to_safe_name('_'.join(str(__lowercase ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
_lowerCamelCase : Dict = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : List[Any] , lowercase : Dict ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Any , lowercase : str , lowercase : List[str] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : Optional[Any] , lowercase : Optional[int] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
def A ( self : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
pass
def A ( self : str , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : bool = True , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = models[model]
_snake_case = self.run_trainer(
stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , )
self.do_checks(lowercase )
return output_dir
def A ( self : Any , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : int = 1 , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase )
_snake_case = f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_snake_case = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_snake_case = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_snake_case = self.get_launcher(lowercase )
_snake_case = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase , env=self.get_env() )
return output_dir
def A ( self : List[str] , lowercase : Any=False ):
'''simple docstring'''
_snake_case = min(2 , get_gpu_count() ) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split() | 686 |
import random
def a_ ( __lowercase : str , __lowercase : Any , __lowercase : Any ) -> Optional[Any]:
_snake_case = a[left_index]
_snake_case = left_index + 1
for j in range(left_index + 1 , __lowercase ):
if a[j] < pivot:
_snake_case , _snake_case = a[i], a[j]
i += 1
_snake_case , _snake_case = a[i - 1], a[left_index]
return i - 1
def a_ ( __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Optional[int] ) -> Tuple:
if left < right:
_snake_case = random.randint(__lowercase , right - 1 )
_snake_case , _snake_case = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_snake_case = partition(__lowercase , __lowercase , __lowercase )
quick_sort_random(
__lowercase , __lowercase , __lowercase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__lowercase , pivot_index + 1 , __lowercase ) # recursive quicksort to the right of the pivot point
def a_ ( ) -> str:
_snake_case = input('Enter numbers separated by a comma:\n' ).strip()
_snake_case = [int(__lowercase ) for item in user_input.split(',' )]
quick_sort_random(__lowercase , 0 , len(__lowercase ) )
print(__lowercase )
if __name__ == "__main__":
main() | 686 | 1 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_lowerCamelCase : Any = datasets.load_iris()
_lowerCamelCase : Dict = np.array(data['''data'''])
_lowerCamelCase : Any = np.array(data['''target'''])
_lowerCamelCase : Dict = data['''target_names''']
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = train_test_split(X, y)
def a_ ( __lowercase : int , __lowercase : List[str] ) -> Optional[Any]:
return np.linalg.norm(np.array(__lowercase ) - np.array(__lowercase ) )
def a_ ( __lowercase : List[str] , __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[Any] , __lowercase : int=5 ) -> List[str]:
_snake_case = zip(__lowercase , __lowercase )
# List of distances of all points from the point to be classified
_snake_case = []
for data_point in data:
_snake_case = euclidean_distance(data_point[0] , __lowercase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
_snake_case = [i[1] for i in sorted(__lowercase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
_snake_case = Counter(__lowercase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4])) | 686 |
import math
def a_ ( __lowercase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a_ ( __lowercase : float = 0.1 ) -> int:
_snake_case = 3
_snake_case = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__lowercase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 | 1 |
import math
def a_ ( __lowercase : int ) -> list[int]:
_snake_case = []
_snake_case = 2
_snake_case = int(math.sqrt(__lowercase ) ) # Size of every segment
_snake_case = [True] * (end + 1)
_snake_case = []
while start <= end:
if temp[start] is True:
in_prime.append(__lowercase )
for i in range(start * start , end + 1 , __lowercase ):
_snake_case = False
start += 1
prime += in_prime
_snake_case = end + 1
_snake_case = min(2 * end , __lowercase )
while low <= n:
_snake_case = [True] * (high - low + 1)
for each in in_prime:
_snake_case = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__lowercase , high + 1 , __lowercase ):
_snake_case = False
for j in range(len(__lowercase ) ):
if temp[j] is True:
prime.append(j + low )
_snake_case = high + 1
_snake_case = min(high + end , __lowercase )
return prime
print(sieve(10**6)) | 686 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = "resnet"
_UpperCAmelCase : Any = ["basic", "bottleneck"]
def __init__( self : Union[str, Any] , lowercase : Dict=3 , lowercase : Any=64 , lowercase : Any=[256, 512, 1_024, 2_048] , lowercase : Dict=[3, 4, 6, 3] , lowercase : Any="bottleneck" , lowercase : Optional[Any]="relu" , lowercase : Dict=False , lowercase : str=None , lowercase : Tuple=None , **lowercase : List[Any] , ):
'''simple docstring'''
super().__init__(**lowercase )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
_snake_case = num_channels
_snake_case = embedding_size
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = layer_type
_snake_case = hidden_act
_snake_case = downsample_in_first_stage
_snake_case = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(lowercase ) + 1 )]
_snake_case , _snake_case = get_aligned_output_features_output_indices(
out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = version.parse("1.11" )
@property
def A ( self : int ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
return 1E-3 | 686 | 1 |
import re
def a_ ( __lowercase : str ) -> str:
if len(re.findall('[ATCG]' , __lowercase ) ) != len(__lowercase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : List[Any] , lowercase : Union[str, Any] , lowercase : int ):
'''simple docstring'''
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(lowercase ) for s in shape] )}.npy'''
def A ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def A ( self : List[Any] , lowercase : Tuple=0 , lowercase : Optional[int]=(4, 4, 64, 64) , lowercase : Optional[int]=False ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase )
return image
def A ( self : Tuple , lowercase : Any=False , lowercase : Union[str, Any]="CompVis/stable-diffusion-v1-4" ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = 'bf16' if fpaa else None
_snake_case , _snake_case = FlaxUNetaDConditionModel.from_pretrained(
lowercase , subfolder='unet' , dtype=lowercase , revision=lowercase )
return model, params
def A ( self : Union[str, Any] , lowercase : str=0 , lowercase : Optional[Any]=(4, 77, 768) , lowercase : int=False ):
'''simple docstring'''
_snake_case = jnp.bfloataa if fpaa else jnp.floataa
_snake_case = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def A ( self : Tuple , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : List[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=lowercase )
_snake_case = self.get_latents(lowercase , fpaa=lowercase )
_snake_case = self.get_encoder_hidden_states(lowercase , fpaa=lowercase )
_snake_case = model.apply(
{'params': params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
_snake_case = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_snake_case = jnp.array(lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def A ( self : str , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : List[str] ):
'''simple docstring'''
_snake_case , _snake_case = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=lowercase )
_snake_case = self.get_latents(lowercase , shape=(4, 4, 96, 96) , fpaa=lowercase )
_snake_case = self.get_encoder_hidden_states(lowercase , shape=(4, 77, 1_024) , fpaa=lowercase )
_snake_case = model.apply(
{'params': params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
_snake_case = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_snake_case = jnp.array(lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1E-2 ) | 686 | 1 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def a_ ( __lowercase : str , __lowercase : List[Any] , __lowercase : List[str] , __lowercase : Union[str, Any] ) -> str:
_snake_case = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, nicht wahr?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_snake_case = {
'wmt16-en-de-dist-12-1': [2_8.3, 2_7.5_2],
'wmt16-en-de-dist-6-1': [2_7.4, 2_7.1_1],
'wmt16-en-de-12-1': [2_6.9, 2_5.7_5],
}
_snake_case = f'''{src_lang}-{tgt_lang}'''
_snake_case = f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=__lowercase , exist_ok=__lowercase )
_snake_case = os.path.join(__lowercase , 'README.md' )
print(f'''Generating {path}''' )
with open(__lowercase , 'w' , encoding='utf-8' ) as f:
f.write(__lowercase )
# make sure we are under the root of the project
_lowerCamelCase : Dict = Path(__file__).resolve().parent.parent.parent
_lowerCamelCase : Any = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_lowerCamelCase : Optional[int] = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name) | 686 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( __lowercase : Any ) -> List[Any]:
_snake_case = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a_ ( __lowercase : Dict ) -> Tuple:
_snake_case , _snake_case = emb.weight.shape
_snake_case = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_snake_case = emb.weight.data
return lin_layer
def a_ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None ) -> Tuple:
_snake_case = {}
for old_key in state_dict.keys():
_snake_case = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_snake_case = key.replace('moe_layer.experts.0' , f'''ffn.experts.expert_{expert_idx}''' )
else:
_snake_case = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
_snake_case = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
_snake_case = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
_snake_case = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
_snake_case = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
_snake_case = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
_snake_case = key.replace('final_layer_norm' , 'ff_layer_norm' )
_snake_case = state_dict[old_key]
return new_dict
def a_ ( __lowercase : Optional[Any] , __lowercase : Tuple , __lowercase : Any , __lowercase : List[str] , __lowercase : str = WEIGHTS_NAME ) -> Union[str, Any]:
_snake_case = []
_snake_case = 0
os.makedirs(__lowercase , exist_ok=__lowercase )
for expert in range(__lowercase ):
_snake_case = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(__lowercase ):
_snake_case = torch.load(__lowercase )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = os.path.join(
__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
torch.save(__lowercase , __lowercase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__lowercase )[0]].dtype )
# Add the last block
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
_snake_case = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__lowercase ) == 1:
_snake_case = os.path.join(__lowercase , __lowercase )
torch.save(__lowercase , __lowercase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__lowercase , __lowercase )
# Otherwise, let's build the index
_snake_case = {}
for idx, shard in enumerate(__lowercase ):
_snake_case = weights_name.replace('.bin' , f'''-{idx+1:05d}-of-{len(__lowercase ):05d}.bin''' )
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__lowercase , os.path.join(__lowercase , __lowercase ) )
for key in shard:
_snake_case = shard_file
# Add the metadata
_snake_case = {'total_size': total_size}
_snake_case = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(__lowercase , __lowercase ) , 'w' , encoding='utf-8' ) as f:
_snake_case = json.dumps(__lowercase , indent=2 , sort_keys=__lowercase ) + '\n'
f.write(__lowercase )
return metadata, index
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
_lowerCamelCase : List[str] = parser.parse_args()
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_lowerCamelCase : Tuple = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_lowerCamelCase : Dict = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path) | 686 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = "gpt_bigcode"
_UpperCAmelCase : Dict = ["past_key_values"]
_UpperCAmelCase : int = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple , lowercase : Dict=50_257 , lowercase : Optional[int]=1_024 , lowercase : List[str]=768 , lowercase : Optional[Any]=12 , lowercase : str=12 , lowercase : Any=None , lowercase : Optional[Any]="gelu_pytorch_tanh" , lowercase : Dict=0.1 , lowercase : Any=0.1 , lowercase : Union[str, Any]=0.1 , lowercase : Dict=1E-5 , lowercase : Dict=0.02 , lowercase : Dict=True , lowercase : int=True , lowercase : List[Any]=50_256 , lowercase : str=50_256 , lowercase : Any=True , lowercase : Dict=True , lowercase : int=True , **lowercase : Union[str, Any] , ):
'''simple docstring'''
_snake_case = vocab_size
_snake_case = n_positions
_snake_case = n_embd
_snake_case = n_layer
_snake_case = n_head
_snake_case = n_inner
_snake_case = activation_function
_snake_case = resid_pdrop
_snake_case = embd_pdrop
_snake_case = attn_pdrop
_snake_case = layer_norm_epsilon
_snake_case = initializer_range
_snake_case = scale_attn_weights
_snake_case = use_cache
_snake_case = attention_softmax_in_fpaa
_snake_case = scale_attention_softmax_in_fpaa
_snake_case = multi_query
_snake_case = bos_token_id
_snake_case = eos_token_id
super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase ) | 686 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_lowerCamelCase : List[Any] = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_lowerCamelCase : Any = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_lowerCamelCase : Union[str, Any] = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def a_ ( __lowercase : List[Any] , __lowercase : Any ) -> Union[str, Any]:
return float((preds == labels).mean() )
def a_ ( __lowercase : Optional[Any] , __lowercase : List[str] ) -> Dict:
_snake_case = simple_accuracy(__lowercase , __lowercase )
_snake_case = float(fa_score(y_true=__lowercase , y_pred=__lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( __lowercase : int , __lowercase : str ) -> str:
_snake_case = float(pearsonr(__lowercase , __lowercase )[0] )
_snake_case = float(spearmanr(__lowercase , __lowercase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def A ( self : List[Any] , lowercase : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowercase , lowercase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowercase , lowercase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowercase , lowercase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' ) | 686 | 1 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Tuple = ["input_features", "is_longer"]
def __init__( self : Optional[int] , lowercase : Optional[Any]=64 , lowercase : Union[str, Any]=48_000 , lowercase : str=480 , lowercase : Optional[Any]=10 , lowercase : Optional[Any]=1_024 , lowercase : Optional[int]=0.0 , lowercase : List[Any]=False , lowercase : float = 0 , lowercase : float = 14_000 , lowercase : int = None , lowercase : str = "fusion" , lowercase : str = "repeatpad" , **lowercase : Any , ):
'''simple docstring'''
super().__init__(
feature_size=lowercase , sampling_rate=lowercase , padding_value=lowercase , return_attention_mask=lowercase , **lowercase , )
_snake_case = top_db
_snake_case = truncation
_snake_case = padding
_snake_case = fft_window_size
_snake_case = (fft_window_size >> 1) + 1
_snake_case = hop_length
_snake_case = max_length_s
_snake_case = max_length_s * sampling_rate
_snake_case = sampling_rate
_snake_case = frequency_min
_snake_case = frequency_max
_snake_case = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowercase , min_frequency=lowercase , max_frequency=lowercase , sampling_rate=lowercase , norm=lowercase , mel_scale='htk' , )
_snake_case = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowercase , min_frequency=lowercase , max_frequency=lowercase , sampling_rate=lowercase , norm='slaney' , mel_scale='slaney' , )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = copy.deepcopy(self.__dict__ )
_snake_case = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def A ( self : Optional[int] , lowercase : np.array , lowercase : Optional[np.array] = None ):
'''simple docstring'''
_snake_case = spectrogram(
lowercase , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowercase , log_mel='dB' , )
return log_mel_spectrogram.T
def A ( self : Tuple , lowercase : Any , lowercase : Optional[int] , lowercase : Optional[Any] ):
'''simple docstring'''
_snake_case = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_snake_case = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_snake_case = [0]
# randomly choose index for each part
_snake_case = np.random.choice(ranges[0] )
_snake_case = np.random.choice(ranges[1] )
_snake_case = np.random.choice(ranges[2] )
_snake_case = mel[idx_front : idx_front + chunk_frames, :]
_snake_case = mel[idx_middle : idx_middle + chunk_frames, :]
_snake_case = mel[idx_back : idx_back + chunk_frames, :]
_snake_case = torch.tensor(mel[None, None, :] )
_snake_case = torch.nn.functional.interpolate(
lowercase , size=[chunk_frames, 64] , mode='bilinear' , align_corners=lowercase )
_snake_case = mel_shrink[0][0].numpy()
_snake_case = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def A ( self : Tuple , lowercase : np.array , lowercase : Any , lowercase : Optional[int] , lowercase : Dict ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_snake_case = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_snake_case = len(lowercase ) - max_length
_snake_case = np.random.randint(0 , overflow + 1 )
_snake_case = waveform[idx : idx + max_length]
_snake_case = self._np_extract_fbank_features(lowercase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_snake_case = self._np_extract_fbank_features(lowercase , self.mel_filters )
_snake_case = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_snake_case = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_snake_case = np.stack([mel, mel, mel, mel] , axis=0 )
_snake_case = False
else:
_snake_case = self._random_mel_fusion(lowercase , lowercase , lowercase )
_snake_case = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''' )
else:
_snake_case = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_snake_case = int(max_length / len(lowercase ) )
_snake_case = np.stack(np.tile(lowercase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_snake_case = int(max_length / len(lowercase ) )
_snake_case = np.stack(np.tile(lowercase , lowercase ) )
_snake_case = np.pad(lowercase , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
_snake_case = self._np_extract_fbank_features(lowercase , self.mel_filters )
_snake_case = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_snake_case = self._np_extract_fbank_features(lowercase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Dict , lowercase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowercase : str = None , lowercase : Optional[str] = None , lowercase : Optional[int] = None , lowercase : Optional[int] = None , lowercase : Optional[Union[str, TensorType]] = None , **lowercase : str , ):
'''simple docstring'''
_snake_case = truncation if truncation is not None else self.truncation
_snake_case = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_snake_case = isinstance(lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
_snake_case = is_batched_numpy or (
isinstance(lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_snake_case = [np.asarray(lowercase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowercase , np.ndarray ):
_snake_case = np.asarray(lowercase , dtype=np.floataa )
elif isinstance(lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_snake_case = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_snake_case = [np.asarray(lowercase )]
# convert to mel spectrogram, truncate and pad if needed.
_snake_case = [
self._get_input_mel(lowercase , max_length if max_length else self.nb_max_samples , lowercase , lowercase )
for waveform in raw_speech
]
_snake_case = []
_snake_case = []
for mel, longer in padded_inputs:
input_mel.append(lowercase )
is_longer.append(lowercase )
if truncation == "fusion" and sum(lowercase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_snake_case = np.random.randint(0 , len(lowercase ) )
_snake_case = True
if isinstance(input_mel[0] , lowercase ):
_snake_case = [np.asarray(lowercase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_snake_case = [[longer] for longer in is_longer]
_snake_case = {'input_features': input_mel, 'is_longer': is_longer}
_snake_case = BatchFeature(lowercase )
if return_tensors is not None:
_snake_case = input_features.convert_to_tensors(lowercase )
return input_features | 686 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_lowerCamelCase : Dict = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = "sequence-classification"
def __init__( self : Optional[int] , lowercase : Any ):
'''simple docstring'''
if type(lowercase ) == dict:
_snake_case = Namespace(**lowercase )
_snake_case = glue_output_modes[hparams.task]
_snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(lowercase , lowercase , self.mode )
def A ( self : Optional[Any] , **lowercase : Optional[Any] ):
'''simple docstring'''
return self.model(**lowercase )
def A ( self : Optional[Any] , lowercase : str , lowercase : Tuple ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case = outputs[0]
_snake_case = self.trainer.lr_schedulers[0]['scheduler']
_snake_case = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.hparams
_snake_case = processors[args.task]()
_snake_case = processor.get_labels()
for mode in ["train", "dev"]:
_snake_case = self._feature_file(lowercase )
if os.path.exists(lowercase ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , lowercase )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_snake_case = convert_examples_to_features(
lowercase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , lowercase )
torch.save(lowercase , lowercase )
def A ( self : Dict , lowercase : str , lowercase : int , lowercase : bool = False ):
'''simple docstring'''
_snake_case = 'dev' if mode == 'test' else mode
_snake_case = self._feature_file(lowercase )
logger.info('Loading features from cached file %s' , lowercase )
_snake_case = torch.load(lowercase )
_snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(lowercase , lowercase , lowercase , lowercase ) , batch_size=lowercase , shuffle=lowercase , )
def A ( self : str , lowercase : Optional[Any] , lowercase : str ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case , _snake_case = outputs[:2]
_snake_case = logits.detach().cpu().numpy()
_snake_case = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : int , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_snake_case = np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_snake_case = np.argmax(lowercase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_snake_case = np.squeeze(lowercase )
_snake_case = np.concatenate([x['target'] for x in outputs] , axis=0 )
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , lowercase , lowercase )}
_snake_case = dict(results.items() )
_snake_case = results
return ret, preds_list, out_label_list
def A ( self : int , lowercase : list ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : List[str] , lowercase : Any ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( lowercase : Tuple , lowercase : Any ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(lowercase , lowercase )
parser.add_argument(
'--max_seq_length' , default=128 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=lowercase , required=lowercase , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=lowercase , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def a_ ( ) -> Union[str, Any]:
_snake_case = argparse.ArgumentParser()
add_generic_args(__lowercase , os.getcwd() )
_snake_case = GLUETransformer.add_model_specific_args(__lowercase , os.getcwd() )
_snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_snake_case = os.path.join(
'./results' , f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_snake_case = GLUETransformer(__lowercase )
_snake_case = generic_train(__lowercase , __lowercase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_snake_case = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=__lowercase ) )
_snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__lowercase )
if __name__ == "__main__":
main() | 686 | 1 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_lowerCamelCase : Union[str, Any] = logging.getLogger(__name__)
def a_ ( __lowercase : torch.nn.Module , __lowercase : BnbQuantizationConfig , __lowercase : Union[str, os.PathLike] = None , __lowercase : Optional[Dict[str, Union[int, str, torch.device]]] = None , __lowercase : Optional[List[str]] = None , __lowercase : Optional[Dict[Union[int, str], Union[int, str]]] = None , __lowercase : Optional[Union[str, os.PathLike]] = None , __lowercase : bool = False , ) -> Union[str, Any]:
_snake_case = bnb_quantization_config.load_in_abit
_snake_case = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
_snake_case = []
# custom device map
if isinstance(__lowercase , __lowercase ) and len(device_map.keys() ) > 1:
_snake_case = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
_snake_case = get_keys_to_not_convert(__lowercase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__lowercase )
_snake_case = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
_snake_case = []
_snake_case = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__lowercase )
# compatibility with peft
_snake_case = load_in_abit
_snake_case = load_in_abit
_snake_case = get_parameter_device(__lowercase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
_snake_case = replace_with_bnb_layers(__lowercase , __lowercase , modules_to_not_convert=__lowercase )
# convert param to the right dtype
_snake_case = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
_snake_case = name.replace('.weight' , '' ).replace('.bias' , '' )
_snake_case = getattr(__lowercase , __lowercase , __lowercase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__lowercase ):
param.to(__lowercase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
_snake_case = replace_with_bnb_layers(
__lowercase , __lowercase , modules_to_not_convert=__lowercase )
_snake_case = get_quantized_model_device_map(
__lowercase , __lowercase , __lowercase , max_memory=__lowercase , no_split_module_classes=__lowercase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
_snake_case = True
_snake_case = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
__lowercase , __lowercase , __lowercase , dtype=bnb_quantization_config.torch_dtype , offload_folder=__lowercase , offload_state_dict=__lowercase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__lowercase , device_map=__lowercase , offload_dir=__lowercase )
def a_ ( __lowercase : Optional[int] , __lowercase : str , __lowercase : Any=None , __lowercase : List[Any]=None , __lowercase : Union[str, Any]=None ) -> int:
if device_map is None:
if torch.cuda.is_available():
_snake_case = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(__lowercase , __lowercase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
_snake_case = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
_snake_case = {}
_snake_case = special_dtypes
_snake_case = no_split_module_classes
_snake_case = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
_snake_case = get_balanced_memory(
__lowercase , low_zero=(device_map == 'balanced_low_0') , max_memory=__lowercase , **__lowercase , )
_snake_case = max_memory
_snake_case = infer_auto_device_map(__lowercase , **__lowercase )
if isinstance(__lowercase , __lowercase ):
# check if don't have any quantized module on the cpu
_snake_case = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
_snake_case = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def a_ ( __lowercase : Dict , __lowercase : Optional[Any] , __lowercase : Any=None , __lowercase : Tuple=None ) -> Tuple:
if modules_to_not_convert is None:
_snake_case = []
_snake_case , _snake_case = _replace_with_bnb_layers(
__lowercase , __lowercase , __lowercase , __lowercase )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def a_ ( __lowercase : List[Any] , __lowercase : Dict , __lowercase : List[Any]=None , __lowercase : List[str]=None , ) -> Optional[int]:
_snake_case = False
for name, module in model.named_children():
if current_key_name is None:
_snake_case = []
current_key_name.append(__lowercase )
if isinstance(__lowercase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
_snake_case = '.'.join(__lowercase )
_snake_case = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
_snake_case = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
_snake_case = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__lowercase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
_snake_case = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
_snake_case = module.weight.data
if module.bias is not None:
_snake_case = module.bias.data
bnb_module.requires_grad_(__lowercase )
setattr(__lowercase , __lowercase , __lowercase )
_snake_case = True
if len(list(module.children() ) ) > 0:
_snake_case , _snake_case = _replace_with_bnb_layers(
__lowercase , __lowercase , __lowercase , __lowercase )
_snake_case = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def a_ ( __lowercase : Any ) -> Optional[int]:
# Create a copy of the model
with init_empty_weights():
_snake_case = deepcopy(__lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
_snake_case = find_tied_parameters(__lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowercase , __lowercase ):
_snake_case = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_snake_case = sum(__lowercase , [] )
_snake_case = len(__lowercase ) > 0
# Check if it is a base model
_snake_case = False
if hasattr(__lowercase , 'base_model_prefix' ):
_snake_case = not hasattr(__lowercase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_snake_case = list(model.named_children() )
_snake_case = [list_modules[-1][0]]
# add last module together with tied weights
_snake_case = set(__lowercase ) - set(__lowercase )
_snake_case = list(set(__lowercase ) ) + list(__lowercase )
# remove ".weight" from the keys
_snake_case = ['.weight', '.bias']
_snake_case = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_snake_case = name.replace(__lowercase , '' )
filtered_module_names.append(__lowercase )
return filtered_module_names
def a_ ( __lowercase : List[str] ) -> List[Any]:
for m in model.modules():
if isinstance(__lowercase , bnb.nn.Linearabit ):
return True
return False
def a_ ( __lowercase : nn.Module ) -> Any:
return next(parameter.parameters() ).device
def a_ ( __lowercase : List[str] , __lowercase : Any , __lowercase : List[str] , __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Any , __lowercase : Tuple ) -> Tuple:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(__lowercase , __lowercase , 0 , dtype=__lowercase , value=__lowercase )
_snake_case = param_name
_snake_case = model
if "." in tensor_name:
_snake_case = tensor_name.split('.' )
for split in splits[:-1]:
_snake_case = getattr(__lowercase , __lowercase )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
_snake_case = new_module
_snake_case = splits[-1]
# offload weights
_snake_case = False
offload_weight(module._parameters[tensor_name] , __lowercase , __lowercase , index=__lowercase )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , __lowercase , index=__lowercase , )
else:
offload_weight(__lowercase , __lowercase , __lowercase , index=__lowercase )
offload_weight(__lowercase , param_name.replace('weight' , 'SCB' ) , __lowercase , index=__lowercase )
set_module_tensor_to_device(__lowercase , __lowercase , 'meta' , dtype=__lowercase , value=torch.empty(*param.size() ) ) | 686 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = LEDConfig
_UpperCAmelCase : int = {}
_UpperCAmelCase : List[str] = "gelu"
def __init__( self : Union[str, Any] , lowercase : Optional[int] , lowercase : Dict=13 , lowercase : Dict=7 , lowercase : Tuple=True , lowercase : Dict=False , lowercase : Dict=99 , lowercase : Any=32 , lowercase : List[Any]=2 , lowercase : List[str]=4 , lowercase : List[str]=37 , lowercase : Dict=0.1 , lowercase : int=0.1 , lowercase : List[Any]=20 , lowercase : int=2 , lowercase : Optional[Any]=1 , lowercase : List[str]=0 , lowercase : Optional[int]=4 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = eos_token_id
_snake_case = pad_token_id
_snake_case = bos_token_id
_snake_case = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case = prepare_led_inputs_dict(lowercase , lowercase , lowercase )
_snake_case = tf.concat(
[tf.zeros_like(lowercase )[:, :-1], tf.ones_like(lowercase )[:, -1:]] , axis=-1 , )
_snake_case = global_attention_mask
return config, inputs_dict
def A ( self : str , lowercase : str , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = TFLEDModel(config=lowercase ).get_decoder()
_snake_case = inputs_dict['input_ids']
_snake_case = input_ids[:1, :]
_snake_case = inputs_dict['attention_mask'][:1, :]
_snake_case = 1
# first forward pass
_snake_case = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
_snake_case , _snake_case = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case = model(lowercase , attention_mask=lowercase )[0]
_snake_case = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case = output_from_no_past[:, -3:, random_slice_idx]
_snake_case = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : str=None , ) -> Union[str, Any]:
if attention_mask is None:
_snake_case = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_UpperCAmelCase : Optional[int] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_UpperCAmelCase : Tuple = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCAmelCase : str = True
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : str = False
_UpperCAmelCase : List[Any] = False
def A ( self : Any ):
'''simple docstring'''
_snake_case = TFLEDModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = tf.zeros_like(inputs_dict['attention_mask'] )
_snake_case = 2
_snake_case = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
_snake_case = True
_snake_case = self.model_tester.seq_length
_snake_case = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase : List[str] ):
_snake_case = outputs.decoder_attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase : List[str] ):
_snake_case = [t.numpy() for t in outputs.encoder_attentions]
_snake_case = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = False
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
_snake_case = len(lowercase )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
if self.is_encoder_decoder:
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_decoder_attentions_output(lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
# Check attention is always last and order is fine
_snake_case = True
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase ) )
self.assertEqual(model.config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def A ( self : List[Any] ):
'''simple docstring'''
pass
def A ( self : Any ):
'''simple docstring'''
pass
def a_ ( __lowercase : str ) -> Optional[Any]:
return tf.constant(__lowercase , dtype=tf.intaa )
_lowerCamelCase : List[Any] = 1E-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 )
def A ( self : str ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 , rtol=1E-3 ) | 686 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : int = LDMTextToImagePipeline
_UpperCAmelCase : Tuple = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
_UpperCAmelCase : Dict = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCAmelCase : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase : List[str] = False
def A ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
_snake_case = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
_snake_case = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0 )
_snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
_snake_case = CLIPTextModel(lowercase )
_snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_snake_case = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def A ( self : List[str] , lowercase : Optional[int] , lowercase : Any=0 ):
'''simple docstring'''
if str(lowercase ).startswith('mps' ):
_snake_case = torch.manual_seed(lowercase )
else:
_snake_case = torch.Generator(device=lowercase ).manual_seed(lowercase )
_snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def A ( self : str ):
'''simple docstring'''
_snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = LDMTextToImagePipeline(**lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_snake_case = self.get_dummy_inputs(lowercase )
_snake_case = pipe(**lowercase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
_snake_case = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Optional[int] , lowercase : Tuple , lowercase : str=torch.floataa , lowercase : Dict=0 ):
'''simple docstring'''
_snake_case = torch.manual_seed(lowercase )
_snake_case = np.random.RandomState(lowercase ).standard_normal((1, 4, 32, 32) )
_snake_case = torch.from_numpy(lowercase ).to(device=lowercase , dtype=lowercase )
_snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_snake_case = self.get_inputs(lowercase )
_snake_case = pipe(**lowercase ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
_snake_case = np.array([0.51825, 0.52850, 0.52543, 0.54258, 0.52304, 0.52569, 0.54363, 0.55276, 0.56878] )
_snake_case = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Tuple , lowercase : Tuple , lowercase : Optional[Any]=torch.floataa , lowercase : Tuple=0 ):
'''simple docstring'''
_snake_case = torch.manual_seed(lowercase )
_snake_case = np.random.RandomState(lowercase ).standard_normal((1, 4, 32, 32) )
_snake_case = torch.from_numpy(lowercase ).to(device=lowercase , dtype=lowercase )
_snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def A ( self : str ):
'''simple docstring'''
_snake_case = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_snake_case = self.get_inputs(lowercase )
_snake_case = pipe(**lowercase ).images[0]
_snake_case = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
_snake_case = np.abs(expected_image - image ).max()
assert max_diff < 1E-3 | 686 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_lowerCamelCase : Union[str, Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_lowerCamelCase : Union[str, Any] = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
_lowerCamelCase : Optional[int] = '''zero2'''
_lowerCamelCase : List[Any] = '''zero3'''
_lowerCamelCase : Dict = [ZEROa, ZEROa]
def a_ ( __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Tuple ) -> Dict:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_snake_case = parameterized.to_safe_name('_'.join(str(__lowercase ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
_lowerCamelCase : Dict = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : List[Any] , lowercase : Dict ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Any , lowercase : str , lowercase : List[str] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : Optional[Any] , lowercase : Optional[int] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
def A ( self : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
pass
def A ( self : str , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : bool = True , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = models[model]
_snake_case = self.run_trainer(
stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , )
self.do_checks(lowercase )
return output_dir
def A ( self : Any , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : int = 1 , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase )
_snake_case = f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_snake_case = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_snake_case = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_snake_case = self.get_launcher(lowercase )
_snake_case = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase , env=self.get_env() )
return output_dir
def A ( self : List[str] , lowercase : Any=False ):
'''simple docstring'''
_snake_case = min(2 , get_gpu_count() ) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split() | 686 | 1 |
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
_lowerCamelCase : Optional[Any] = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def a_ ( __lowercase : List[Any] ) -> Tuple:
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
_lowerCamelCase : int = parser.parse_args()
if args.check_lib:
_lowerCamelCase : Tuple = importlib.import_module('''transformers''')
_lowerCamelCase : List[Any] = Path(transformers_module.__file__).parent
else:
_lowerCamelCase : Any = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''') | 686 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCamelCase : int = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = "beit"
def __init__( self : Any , lowercase : Any=8_192 , lowercase : str=768 , lowercase : str=12 , lowercase : Optional[int]=12 , lowercase : str=3_072 , lowercase : Any="gelu" , lowercase : Union[str, Any]=0.0 , lowercase : Optional[int]=0.0 , lowercase : Optional[int]=0.02 , lowercase : Tuple=1E-12 , lowercase : str=224 , lowercase : Optional[Any]=16 , lowercase : List[str]=3 , lowercase : List[str]=False , lowercase : str=False , lowercase : Optional[int]=False , lowercase : List[str]=False , lowercase : List[Any]=0.1 , lowercase : int=0.1 , lowercase : Tuple=True , lowercase : int=[3, 5, 7, 11] , lowercase : Any=[1, 2, 3, 6] , lowercase : int=True , lowercase : Dict=0.4 , lowercase : Union[str, Any]=256 , lowercase : Optional[Any]=1 , lowercase : int=False , lowercase : Dict=255 , **lowercase : List[str] , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = use_mask_token
_snake_case = use_absolute_position_embeddings
_snake_case = use_relative_position_bias
_snake_case = use_shared_relative_position_bias
_snake_case = layer_scale_init_value
_snake_case = drop_path_rate
_snake_case = use_mean_pooling
# decode head attributes (semantic segmentation)
_snake_case = out_indices
_snake_case = pool_scales
# auxiliary head attributes (semantic segmentation)
_snake_case = use_auxiliary_head
_snake_case = auxiliary_loss_weight
_snake_case = auxiliary_channels
_snake_case = auxiliary_num_convs
_snake_case = auxiliary_concat_input
_snake_case = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = version.parse("1.11" )
@property
def A ( self : int ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : str ):
'''simple docstring'''
return 1E-4 | 686 |
import random
from .binary_exp_mod import bin_exp_mod
def a_ ( __lowercase : int , __lowercase : Any=1_000 ) -> int:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_snake_case = n - 1
_snake_case = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_snake_case = 0
while count < prec:
_snake_case = random.randint(2 , n - 1 )
_snake_case = bin_exp_mod(__lowercase , __lowercase , __lowercase )
if b != 1:
_snake_case = True
for _ in range(__lowercase ):
if b == n - 1:
_snake_case = False
break
_snake_case = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_lowerCamelCase : Tuple = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i))) | 686 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = StableDiffusionInpaintPipeline
_UpperCAmelCase : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_UpperCAmelCase : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_UpperCAmelCase : Union[str, Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_UpperCAmelCase : Union[str, Any] = frozenset([] )
def A ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowercase , )
_snake_case = PNDMScheduler(skip_prk_steps=lowercase )
torch.manual_seed(0 )
_snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
_snake_case = CLIPTextModel(lowercase )
_snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_snake_case = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def A ( self : Optional[int] , lowercase : Tuple , lowercase : Dict=0 ):
'''simple docstring'''
_snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
_snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_snake_case = Image.fromarray(np.uinta(lowercase ) ).convert('RGB' ).resize((64, 64) )
_snake_case = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) )
if str(lowercase ).startswith('mps' ):
_snake_case = torch.manual_seed(lowercase )
else:
_snake_case = torch.Generator(device=lowercase ).manual_seed(lowercase )
_snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def A ( self : Any ):
'''simple docstring'''
_snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionInpaintPipeline(**lowercase )
_snake_case = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
_snake_case = self.get_dummy_inputs(lowercase )
_snake_case = sd_pipe(**lowercase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : str ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Dict ):
'''simple docstring'''
_snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
_snake_case = 'stabilityai/stable-diffusion-2-inpainting'
_snake_case = StableDiffusionInpaintPipeline.from_pretrained(lowercase , safety_checker=lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
_snake_case = 'Face of a yellow cat, high resolution, sitting on a park bench'
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
prompt=lowercase , image=lowercase , mask_image=lowercase , generator=lowercase , output_type='np' , )
_snake_case = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
_snake_case = 'stabilityai/stable-diffusion-2-inpainting'
_snake_case = StableDiffusionInpaintPipeline.from_pretrained(
lowercase , torch_dtype=torch.floataa , safety_checker=lowercase , )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
_snake_case = 'Face of a yellow cat, high resolution, sitting on a park bench'
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
prompt=lowercase , image=lowercase , mask_image=lowercase , generator=lowercase , output_type='np' , )
_snake_case = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def A ( self : Tuple ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_snake_case = 'stabilityai/stable-diffusion-2-inpainting'
_snake_case = PNDMScheduler.from_pretrained(lowercase , subfolder='scheduler' )
_snake_case = StableDiffusionInpaintPipeline.from_pretrained(
lowercase , safety_checker=lowercase , scheduler=lowercase , torch_dtype=torch.floataa , )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_snake_case = 'Face of a yellow cat, high resolution, sitting on a park bench'
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
prompt=lowercase , image=lowercase , mask_image=lowercase , generator=lowercase , num_inference_steps=2 , output_type='np' , )
_snake_case = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9 | 686 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_lowerCamelCase : int = re.compile(r'''\s+''')
def a_ ( __lowercase : List[Any] ) -> int:
return {"hash": hashlib.mda(re.sub(__lowercase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def a_ ( __lowercase : List[Any] ) -> Dict:
_snake_case = [len(__lowercase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(__lowercase ), "line_max": max(__lowercase )}
def a_ ( __lowercase : Optional[int] ) -> List[str]:
_snake_case = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] ) -> Optional[int]:
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def a_ ( __lowercase : Union[str, Any] , __lowercase : int=5 ) -> Optional[Any]:
_snake_case = ['auto-generated', 'autogenerated', 'automatically generated']
_snake_case = example['content'].splitlines()
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a_ ( __lowercase : List[Any] , __lowercase : int=5 , __lowercase : Tuple=0.0_5 ) -> Union[str, Any]:
_snake_case = ['unit tests', 'test file', 'configuration file']
_snake_case = example['content'].splitlines()
_snake_case = 0
_snake_case = 0
# first test
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_snake_case = example['content'].count('\n' )
_snake_case = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a_ ( __lowercase : Union[str, Any] ) -> Any:
_snake_case = ['def ', 'class ', 'for ', 'while ']
_snake_case = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a_ ( __lowercase : Tuple , __lowercase : Any=4 ) -> List[str]:
_snake_case = example['content'].splitlines()
_snake_case = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a_ ( __lowercase : Dict ) -> Dict:
_snake_case = tokenizer(example['content'] , truncation=__lowercase )['input_ids']
_snake_case = len(example['content'] ) / len(__lowercase )
return {"ratio": ratio}
def a_ ( __lowercase : Optional[Any] ) -> Any:
_snake_case = {}
results.update(get_hash(__lowercase ) )
results.update(line_stats(__lowercase ) )
results.update(alpha_stats(__lowercase ) )
results.update(char_token_ratio(__lowercase ) )
results.update(is_autogenerated(__lowercase ) )
results.update(is_config_or_test(__lowercase ) )
results.update(has_no_keywords(__lowercase ) )
results.update(has_few_assignments(__lowercase ) )
return results
def a_ ( __lowercase : Optional[int] , __lowercase : str , __lowercase : List[Any] ) -> int:
if not check_uniques(__lowercase , __lowercase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a_ ( __lowercase : Dict ) -> Dict:
with open(__lowercase , 'rb' ) as f_in:
with gzip.open(str(__lowercase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(__lowercase , __lowercase )
os.unlink(__lowercase )
# Settings
_lowerCamelCase : Dict = HfArgumentParser(PreprocessingArguments)
_lowerCamelCase : Dict = parser.parse_args()
if args.num_workers is None:
_lowerCamelCase : int = multiprocessing.cpu_count()
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_lowerCamelCase : Any = time.time()
_lowerCamelCase : Optional[Any] = load_dataset(args.dataset_name, split='''train''')
print(F'Time to load dataset: {time.time()-t_start:.2f}')
# Run preprocessing
_lowerCamelCase : Optional[int] = time.time()
_lowerCamelCase : Union[str, Any] = ds.map(preprocess, num_proc=args.num_workers)
print(F'Time to preprocess dataset: {time.time()-t_start:.2f}')
# Deduplicate hashes
_lowerCamelCase : List[Any] = set(ds.unique('''hash'''))
_lowerCamelCase : Dict = len(uniques) / len(ds)
print(F'Fraction of duplicates: {1-frac:.2%}')
# Deduplicate data and apply heuristics
_lowerCamelCase : List[Any] = time.time()
_lowerCamelCase : Optional[int] = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F'Time to filter dataset: {time.time()-t_start:.2f}')
print(F'Size of filtered dataset: {len(ds_filter)}')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_lowerCamelCase : Union[str, Any] = time.time()
_lowerCamelCase , _lowerCamelCase : Dict = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'Time to deduplicate dataset: {time.time()-t_start:.2f}')
print(F'Size of deduplicate dataset: {len(ds_filter)}')
# Save data in batches of samples_per_file
_lowerCamelCase : Optional[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
_lowerCamelCase : int = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
_lowerCamelCase : Union[str, Any] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_lowerCamelCase : Dict = str(data_dir / F'file-{file_number+1:012}.json')
_lowerCamelCase : str = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'Time to save dataset: {time.time()-t_start:.2f}') | 686 | 1 |
from jiwer import compute_measures
import datasets
_lowerCamelCase : Optional[Any] = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
_lowerCamelCase : int = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
_lowerCamelCase : List[str] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Any ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] , )
def A ( self : Optional[int] , lowercase : Tuple=None , lowercase : Any=None , lowercase : int=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(lowercase , lowercase )["wer"]
else:
_snake_case = 0
_snake_case = 0
for prediction, reference in zip(lowercase , lowercase ):
_snake_case = compute_measures(lowercase , lowercase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 686 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : int = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = "yolos"
def __init__( self : int , lowercase : List[str]=768 , lowercase : Tuple=12 , lowercase : int=12 , lowercase : int=3_072 , lowercase : Optional[int]="gelu" , lowercase : str=0.0 , lowercase : Optional[int]=0.0 , lowercase : Optional[Any]=0.02 , lowercase : List[str]=1E-12 , lowercase : Dict=[512, 864] , lowercase : Union[str, Any]=16 , lowercase : List[Any]=3 , lowercase : List[str]=True , lowercase : Optional[int]=100 , lowercase : int=True , lowercase : Dict=False , lowercase : str=1 , lowercase : int=5 , lowercase : Tuple=2 , lowercase : List[str]=5 , lowercase : Any=2 , lowercase : List[str]=0.1 , **lowercase : int , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = qkv_bias
_snake_case = num_detection_tokens
_snake_case = use_mid_position_embeddings
_snake_case = auxiliary_loss
# Hungarian matcher
_snake_case = class_cost
_snake_case = bbox_cost
_snake_case = giou_cost
# Loss coefficients
_snake_case = bbox_loss_coefficient
_snake_case = giou_loss_coefficient
_snake_case = eos_coefficient
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = version.parse("1.11" )
@property
def A ( self : str ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Any ):
'''simple docstring'''
return 1E-4
@property
def A ( self : List[Any] ):
'''simple docstring'''
return 12 | 686 | 1 |
def a_ ( __lowercase : int = 50 ) -> int:
_snake_case = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'{solution() = }') | 686 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_lowerCamelCase : Tuple = logging.get_logger(__name__)
# General docstring
_lowerCamelCase : Union[str, Any] = '''ResNetConfig'''
# Base docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[Any] = [1, 2_048, 7, 7]
# Image classification docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[int] = '''tiger cat'''
_lowerCamelCase : str = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 3 , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(
lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=kernel_size // 2 , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def A ( self : Union[str, Any] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def A ( self : Tuple , lowercase : Tensor ):
'''simple docstring'''
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowercase )
_snake_case = self.pooler(lowercase )
return embedding
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase : int , lowercase : int , lowercase : int = 2 ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(lowercase , lowercase , kernel_size=1 , stride=lowercase , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : List[str] , lowercase : List[str] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" , lowercase : int = 4 ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , kernel_size=1 ) , ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : Dict , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowercase : ResNetConfig , lowercase : int , lowercase : int , lowercase : int = 2 , lowercase : int = 2 , ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , stride=lowercase , activation=config.hidden_act ) , *[layer(lowercase , lowercase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase , config.depths[1:] ):
self.stages.append(ResNetStage(lowercase , lowercase , lowercase , depth=lowercase ) )
def A ( self : str , lowercase : Tensor , lowercase : bool = False , lowercase : bool = True ):
'''simple docstring'''
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowercase )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase , hidden_states=lowercase , )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = ResNetConfig
_UpperCAmelCase : Tuple = "resnet"
_UpperCAmelCase : Optional[Any] = "pixel_values"
_UpperCAmelCase : Dict = True
def A ( self : List[str] , lowercase : Dict ):
'''simple docstring'''
if isinstance(lowercase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowercase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def A ( self : Tuple , lowercase : List[Any] , lowercase : Optional[Any]=False ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
_snake_case = value
_lowerCamelCase : str = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCamelCase : int = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : Any ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : Union[str, Any] , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowercase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase : int ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowercase )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Union[str, Any] , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[torch.LongTensor] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowercase )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowercase , lowercase )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : Union[str, Any] ):
'''simple docstring'''
super().__init__(lowercase )
super()._init_backbone(lowercase )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@replace_return_docstrings(output_type=lowercase , config_class=_CONFIG_FOR_DOC )
def A ( self : Dict , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowercase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowercase , ) | 686 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {
'''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[str] = "poolformer"
def __init__( self : Dict , lowercase : str=3 , lowercase : int=16 , lowercase : Dict=16 , lowercase : Optional[int]=3 , lowercase : Union[str, Any]=4.0 , lowercase : Tuple=[2, 2, 6, 2] , lowercase : str=[64, 128, 320, 512] , lowercase : str=[7, 3, 3, 3] , lowercase : Dict=[4, 2, 2, 2] , lowercase : Dict=[2, 1, 1, 1] , lowercase : Union[str, Any]=4 , lowercase : Optional[int]=0.0 , lowercase : List[Any]="gelu" , lowercase : Optional[int]=True , lowercase : Tuple=1E-5 , lowercase : int=0.02 , **lowercase : Dict , ):
'''simple docstring'''
_snake_case = num_channels
_snake_case = patch_size
_snake_case = stride
_snake_case = padding
_snake_case = pool_size
_snake_case = hidden_sizes
_snake_case = mlp_ratio
_snake_case = depths
_snake_case = patch_sizes
_snake_case = strides
_snake_case = num_encoder_blocks
_snake_case = drop_path_rate
_snake_case = hidden_act
_snake_case = use_layer_scale
_snake_case = layer_scale_init_value
_snake_case = initializer_range
super().__init__(**lowercase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : str = version.parse("1.11" )
@property
def A ( self : Dict ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Dict ):
'''simple docstring'''
return 2E-3 | 686 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 1 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
_lowerCamelCase : Optional[int] = logging.getLogger()
def a_ ( __lowercase : str ) -> Dict:
_snake_case = {}
_snake_case = os.path.join(__lowercase , 'all_results.json' )
if os.path.exists(__lowercase ):
with open(__lowercase , 'r' ) as f:
_snake_case = json.load(__lowercase )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
_lowerCamelCase : Dict = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def A ( self : str ):
'''simple docstring'''
import xla_spawn
_snake_case = self.get_auto_remove_tmp_dir()
_snake_case = f'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowercase , 'argv' , lowercase ):
_snake_case = time()
xla_spawn.main()
_snake_case = time()
_snake_case = get_results(lowercase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def A ( self : Union[str, Any] ):
'''simple docstring'''
import xla_spawn
_snake_case = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(lowercase , 'argv' , lowercase ):
xla_spawn.main() | 686 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def a_ ( __lowercase : Union[str, Any] ) -> List[Any]:
_snake_case = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
_snake_case = DetaConfig(
backbone_config=__lowercase , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=__lowercase , with_box_refine=__lowercase , two_stage=__lowercase , )
# set labels
_snake_case = 'huggingface/label-files'
if "o365" in model_name:
_snake_case = 366
_snake_case = 'object365-id2label.json'
else:
_snake_case = 91
_snake_case = 'coco-detection-id2label.json'
_snake_case = num_labels
_snake_case = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type='dataset' ) ) , 'r' ) )
_snake_case = {int(__lowercase ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def a_ ( __lowercase : int ) -> str:
_snake_case = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : str ) -> Union[str, Any]:
_snake_case = dct.pop(__lowercase )
_snake_case = val
def a_ ( __lowercase : List[str] , __lowercase : str ) -> Dict:
_snake_case = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_snake_case = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:dim, :]
_snake_case = in_proj_bias[: dim]
_snake_case = in_proj_weight[
dim : dim * 2, :
]
_snake_case = in_proj_bias[
dim : dim * 2
]
_snake_case = in_proj_weight[
-dim :, :
]
_snake_case = in_proj_bias[-dim :]
# fmt: on
def a_ ( __lowercase : Dict , __lowercase : Dict ) -> str:
# transformer decoder self-attention layers
_snake_case = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:hidden_size, :]
_snake_case = in_proj_bias[:hidden_size]
_snake_case = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_snake_case = in_proj_bias[hidden_size : hidden_size * 2]
_snake_case = in_proj_weight[-hidden_size:, :]
_snake_case = in_proj_bias[-hidden_size:]
def a_ ( ) -> List[str]:
_snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a_ ( __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Tuple ) -> Optional[Any]:
_snake_case = get_deta_config(__lowercase )
# load original state dict
if model_name == "deta-swin-large":
_snake_case = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
_snake_case = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
_snake_case = torch.load(__lowercase , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(__lowercase , param.shape )
# rename keys
_snake_case = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_swin_q_k_v(__lowercase , config.backbone_config )
read_in_decoder_q_k_v(__lowercase , __lowercase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "input_proj" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
# finally, create HuggingFace model and load state dict
_snake_case = DetaForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
_snake_case = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(__lowercase )
# load image processor
_snake_case = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
_snake_case = prepare_img()
_snake_case = processor(images=__lowercase , return_tensors='pt' )
_snake_case = encoding['pixel_values']
_snake_case = model(pixel_values.to(__lowercase ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_snake_case = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
_snake_case = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
_snake_case = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
_snake_case = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__lowercase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__lowercase ) , atol=1E-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 686 | 1 |
def a_ ( __lowercase : int ) -> int:
if not isinstance(__lowercase , __lowercase ):
raise TypeError('Input value must be an \'int\' type' )
_snake_case = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_lowerCamelCase : Dict = '''pt'''
elif is_tf_available():
_lowerCamelCase : List[str] = '''tf'''
else:
_lowerCamelCase : List[Any] = '''jax'''
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = PerceiverTokenizer
_UpperCAmelCase : Optional[int] = False
def A ( self : Tuple ):
'''simple docstring'''
super().setUp()
_snake_case = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A ( self : str ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def A ( self : Optional[int] , **lowercase : Dict ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : Optional[int] , lowercase : Tuple , lowercase : Optional[Any]=False , lowercase : int=20 , lowercase : Optional[int]=5 ):
'''simple docstring'''
_snake_case = []
for i in range(len(lowercase ) ):
try:
_snake_case = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_snake_case = list(filter(lambda lowercase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , lowercase ) )
_snake_case = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
_snake_case = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
_snake_case = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case = [t[0] for t in toks]
# Ensure consistency
_snake_case = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
_snake_case = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
_snake_case = ' ' + output_txt
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = 'Unicode €.'
_snake_case = tokenizer(lowercase )
_snake_case = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_snake_case = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]Unicode €.[SEP]' )
_snake_case = tokenizer('e è é ê ë' )
_snake_case = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_snake_case = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_snake_case = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
if FRAMEWORK != "jax":
_snake_case = list(batch.input_ids.numpy()[0] )
else:
_snake_case = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , lowercase )
self.assertIn('attention_mask' , lowercase )
self.assertNotIn('decoder_input_ids' , lowercase )
self.assertNotIn('decoder_attention_mask' , lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
_snake_case = [
'Summary of the text.',
'Another summary.',
]
_snake_case = tokenizer(
text_target=lowercase , max_length=32 , padding='max_length' , truncation=lowercase , return_tensors=lowercase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_snake_case = tokenizer.__class__.from_pretrained(lowercase )
_snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
shutil.rmtree(lowercase )
_snake_case = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_snake_case = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_snake_case = tokenizer.__class__.from_pretrained(lowercase )
_snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_snake_case = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowercase )
_snake_case = [f'''<extra_id_{i}>''' for i in range(125 )]
_snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
_snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case = tokenizer_class.from_pretrained(
lowercase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=lowercase )]
_snake_case = tokenizer_class.from_pretrained(
lowercase , additional_special_tokens=lowercase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
pass
def A ( self : List[str] ):
'''simple docstring'''
pass
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : int ):
'''simple docstring'''
_snake_case = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_snake_case = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
_snake_case = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(lowercase , lowercase ) | 686 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.